< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page

   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"

  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"


  32 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
  33 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
  34 #include "logging/logStream.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "runtime/orderAccess.hpp"
  37 
  38 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
  39   switch (t) {
  40     case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
  41     case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
  42     case ShenandoahFreeSetPartitionId::Collector: return "Collector";

  43     default:
  44       ShouldNotReachHere();
  45       return "Unrecognized";
  46   }
  47 }
  48 






























































  49 #ifndef PRODUCT
  50 void ShenandoahRegionPartitions::dump_bitmap() const {
  51   log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
  52                 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  53                 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  54                 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
  55                 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]);



  56   log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  57                 "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
  58                 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  59                 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  60                 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
  61                 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
  62 
  63   log_debug(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits");



  64   dump_bitmap_range(0, _max-1);
  65 }
  66 
  67 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
  68   assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
  69   assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
  70   idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
  71   idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
  72   idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
  73   while (aligned_start <= aligned_end) {
  74     dump_bitmap_row(aligned_start);
  75     aligned_start += alignment;
  76   }
  77 }
  78 
  79 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
  80   assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
  81   idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
  82   uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
  83   uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
  84   uintx free_bits = mutator_bits | collector_bits;

  85   uintx notfree_bits =  ~free_bits;
  86   log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
  87                 aligned_idx, mutator_bits, collector_bits, notfree_bits);
  88 }
  89 #endif
  90 
  91 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
  92     _max(max_regions),
  93     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
  94     _free_set(free_set),
  95     _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) }
  96 {
  97   make_all_regions_unavailable();
  98 }
  99 
 100 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 101   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 102 }
 103 
 104 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 105   ShenandoahHeapRegion* r = _heap->get_region(idx);
 106   return can_allocate_from(r);
 107 }
 108 
 109 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 110   if (r->is_trash()) {
 111     // This would be recycled on allocation path
 112     return ShenandoahHeapRegion::region_size_bytes();
 113   } else {
 114     return r->free();
 115   }

 145   // which_partition is shrinking after the region that used to be leftmost is retired.
 146   return idx;
 147 }
 148 
 149 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
 150   for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
 151     _membership[partition_id].clear_all();
 152     _leftmosts[partition_id] = _max;
 153     _rightmosts[partition_id] = -1;
 154     _leftmosts_empty[partition_id] = _max;
 155     _rightmosts_empty[partition_id] = -1;;
 156     _capacity[partition_id] = 0;
 157     _used[partition_id] = 0;
 158   }
 159   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 160 }
 161 
 162 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
 163                                                              idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
 164                                                              size_t mutator_region_count, size_t mutator_used) {
 165   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 166   _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
 167   _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
 168   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
 169   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
 170 
 171   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 172   _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
 173   _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
 174 
 175   _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 176   _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 177   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 178   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 179 
 180   _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 181   _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 182   _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 183 }
 184 














 185 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
 186   assert (which_partition < NumPartitions, "Partition must be valid");
 187   _used[int(which_partition)] += bytes;
 188   assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
 189           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
 190           _used[int(which_partition)], _capacity[int(which_partition)], bytes);
 191 }
 192 
 193 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
 194   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 195   assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
 196   if (low_idx == leftmost(partition)) {
 197     assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
 198     if (high_idx + 1 == _max) {
 199       _leftmosts[int(partition)] = _max;
 200     } else {
 201       _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
 202     }
 203     if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
 204       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
 205       _leftmosts_empty[int(partition)] = leftmost(partition);
 206     }
 207   }
 208   if (high_idx == _rightmosts[int(partition)]) {
 209     assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
 210     if (low_idx == 0) {
 211       _rightmosts[int(partition)] = -1;
 212     } else {
 213       _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
 214     }
 215     if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
 216       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 217       _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
 218     }
 219   }
 220   if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
 221     _leftmosts[int(partition)] = _max;
 222     _rightmosts[int(partition)] = -1;
 223     _leftmosts_empty[int(partition)] = _max;
 224     _rightmosts_empty[int(partition)] = -1;
 225   }

 272 
 273   if (used_bytes < _region_size_bytes) {
 274     // Count the alignment pad remnant of memory as used when we retire this region
 275     increase_used(partition, _region_size_bytes - used_bytes);
 276   }
 277   _membership[int(partition)].clear_bit(idx);
 278   shrink_interval_if_boundary_modified(partition, idx);
 279   _region_counts[int(partition)]--;
 280 }
 281 
 282 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
 283   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 284   assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
 285   assert (which_partition < NumPartitions, "selected free partition must be valid");
 286   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 287 
 288   _membership[int(which_partition)].set_bit(idx);
 289   _capacity[int(which_partition)] += _region_size_bytes;
 290   _used[int(which_partition)] += _region_size_bytes - available;
 291   expand_interval_if_boundary_modified(which_partition, idx, available);
 292 
 293   _region_counts[int(which_partition)]++;
 294 }
 295 

















 296 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
 297                                                                   ShenandoahFreeSetPartitionId new_partition, size_t available) {

 298   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 299   assert (orig_partition < NumPartitions, "Original partition must be valid");
 300   assert (new_partition < NumPartitions, "New partition must be valid");
 301   assert (available <= _region_size_bytes, "Available cannot exceed region size");





 302 
 303   // Expected transitions:
 304   //  During rebuild:         Mutator => Collector


 305   //  During flip_to_gc:      Mutator empty => Collector

 306   // At start of update refs: Collector => Mutator
 307   assert (((available <= _region_size_bytes) &&
 308            (((orig_partition == ShenandoahFreeSetPartitionId::Mutator)
 309              && (new_partition == ShenandoahFreeSetPartitionId::Collector)) ||
 310             ((orig_partition == ShenandoahFreeSetPartitionId::Collector)
 311              && (new_partition == ShenandoahFreeSetPartitionId::Mutator)))) ||
 312           ((available == _region_size_bytes) &&
 313            ((orig_partition == ShenandoahFreeSetPartitionId::Mutator)
 314             && (new_partition == ShenandoahFreeSetPartitionId::Collector))), "Unexpected movement between partitions");


 315 
 316   size_t used = _region_size_bytes - available;



 317 
 318   _membership[int(orig_partition)].clear_bit(idx);
 319   _membership[int(new_partition)].set_bit(idx);
 320 
 321   _capacity[int(orig_partition)] -= _region_size_bytes;
 322   _used[int(orig_partition)] -= used;
 323   shrink_interval_if_boundary_modified(orig_partition, idx);
 324 
 325   _capacity[int(new_partition)] += _region_size_bytes;;
 326   _used[int(new_partition)] += used;
 327   expand_interval_if_boundary_modified(new_partition, idx, available);
 328 
 329   _region_counts[int(orig_partition)]--;
 330   _region_counts[int(new_partition)]++;
 331 }
 332 
 333 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
 334   return partition_name(membership(idx));
 335 }
 336 

 465   idx_t leftmosts[UIntNumPartitions];
 466   idx_t rightmosts[UIntNumPartitions];
 467   idx_t empty_leftmosts[UIntNumPartitions];
 468   idx_t empty_rightmosts[UIntNumPartitions];
 469 
 470   for (uint i = 0; i < UIntNumPartitions; i++) {
 471     leftmosts[i] = _max;
 472     empty_leftmosts[i] = _max;
 473     rightmosts[i] = -1;
 474     empty_rightmosts[i] = -1;
 475   }
 476 
 477   for (idx_t i = 0; i < _max; i++) {
 478     ShenandoahFreeSetPartitionId partition = membership(i);
 479     switch (partition) {
 480       case ShenandoahFreeSetPartitionId::NotFree:
 481         break;
 482 
 483       case ShenandoahFreeSetPartitionId::Mutator:
 484       case ShenandoahFreeSetPartitionId::Collector:

 485       {
 486         size_t capacity = _free_set->alloc_capacity(i);
 487         bool is_empty = (capacity == _region_size_bytes);
 488         assert(capacity > 0, "free regions must have allocation capacity");
 489         if (i < leftmosts[int(partition)]) {
 490           leftmosts[int(partition)] = i;
 491         }
 492         if (is_empty && (i < empty_leftmosts[int(partition)])) {
 493           empty_leftmosts[int(partition)] = i;
 494         }
 495         if (i > rightmosts[int(partition)]) {
 496           rightmosts[int(partition)] = i;
 497         }
 498         if (is_empty && (i > empty_rightmosts[int(partition)])) {
 499           empty_rightmosts[int(partition)] = i;
 500         }
 501         break;
 502       }
 503 
 504       default:

 554 
 555   // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 556   // Likewise for empty region partitions.
 557   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 558   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 559   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
 560           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 561           beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
 562   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
 563           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 564           end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
 565 
 566   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 567   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 568   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 569           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 570           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
 571   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 572           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 573           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));



































 574 }
 575 #endif
 576 
 577 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 578   _heap(heap),
 579   _partitions(max_regions, this),
 580   _trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
 581   _right_to_left_bias(false),
 582   _alloc_bias_weight(0)
 583 {
 584   clear_internal();
 585 }
 586 











































 587 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 588   shenandoah_assert_heaplocked();
 589 
 590   // Scan the bitmap looking for a first fit.
 591   //
 592   // Leftmost and rightmost bounds provide enough caching to quickly find a region from which to allocate.

 593   //
 594   // Allocations are biased: GC allocations are taken from the high end of the heap.  Regular (and TLAB)
 595   // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
 596   // Humongous mutator allocations are taken from the bottom of the heap.
 597   //
 598   // Free set maintains mutator and collector partitions.  Mutator can only allocate from the
 599   // Mutator partition.  Collector prefers to allocate from the Collector partition, but may steal
 600   // regions from the Mutator partition if the Collector partition has been depleted.

 601 
 602   switch (req.type()) {
 603     case ShenandoahAllocRequest::_alloc_tlab:
 604     case ShenandoahAllocRequest::_alloc_shared: {
 605       // Try to allocate in the mutator view
 606       if (_alloc_bias_weight-- <= 0) {
 607         // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
 608         // of the heap.  Typically, these are the more recently engaged regions and the objects in these regions have not
 609         // yet had a chance to die (and/or are treated as floating garbage).  If we use the same allocation bias on each
 610         // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
 611         // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
 612         // during the most recent GC pass may once again prevent the region from being collected.  We have found that
 613         // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
 614         // benchmarks.  In the best case, this has the effect of consuming these partially consumed regions before
 615         // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
 616         //
 617         // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
 618         // available regions.  Other potential benefits:
 619         //  1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
 620         //  2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
 621         //     late in the GC cycle.
 622         idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
 623                                      - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
 624         idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
 625                                       - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 626         _right_to_left_bias = (non_empty_on_right > non_empty_on_left);
 627         _alloc_bias_weight = _InitialAllocBiasWeight;
 628       }
 629       if (_right_to_left_bias) {
 630         // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
 631         if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 632           // Use signed idx.  Otherwise, loop will never terminate.
 633           idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
 634           for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
 635             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 636                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 637             ShenandoahHeapRegion* r = _heap->get_region(idx);
 638             // try_allocate_in() increases used if the allocation is successful.
 639             HeapWord* result;
 640             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 641             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 642               return result;
 643             }
 644             idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 645           }
 646         }
 647       } else {
 648         // Allocate from low to high memory.  This keeps the range of fully empty regions more tightly packed.
 649         // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle.  So this
 650         // tends to accumulate "fragmented" uncollected regions in high memory.
 651         if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 652           // Use signed idx.  Otherwise, loop will never terminate.
 653           idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
 654           for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
 655             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 656                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 657             ShenandoahHeapRegion* r = _heap->get_region(idx);
 658             // try_allocate_in() increases used if the allocation is successful.
 659             HeapWord* result;
 660             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 661             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 662               return result;
 663             }
 664             idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
 665           }
 666         }
 667       }
 668       // There is no recovery. Mutator does not touch collector view at all.
 669       break;
 670     }
 671     case ShenandoahAllocRequest::_alloc_gclab:
 672       // GCLABs are for evacuation so we must be in evacuation phase.
 673 
 674     case ShenandoahAllocRequest::_alloc_shared_gc: {
 675       // Fast-path: try to allocate in the collector view first
 676       idx_t leftmost_collector = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
 677       for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx >= leftmost_collector; ) {
 678         assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
 679                "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 680         HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 681         if (result != nullptr) {
 682           return result;
 683         }
 684         idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Collector, idx - 1);
 685       }
 686 
 687       // No dice. Can we borrow space from mutator view?
 688       if (!ShenandoahEvacReserveOverflow) {
 689         return nullptr;
 690       }
 691 
 692       // Try to steal an empty region from the mutator view.
 693       idx_t leftmost_mutator_empty = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 694       for (idx_t idx = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost_mutator_empty; ) {
 695         assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 696                "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 697         ShenandoahHeapRegion* r = _heap->get_region(idx);
 698         if (can_allocate_from(r)) {
 699           flip_to_gc(r);
 700           HeapWord *result = try_allocate_in(r, req, in_new_region);
 701           if (result != nullptr) {
 702             log_debug(gc)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 703             return result;
 704           }
 705         }
 706         idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);







































 707       }




 708 
 709       // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
 710       // due to GC allocations would expose unparsable mutator allocations.
 711       break;

































































 712     }
 713     default:
 714       ShouldNotReachHere();
 715   }

 716   return nullptr;
 717 }
 718 


















































 719 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 720   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 721   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
 722     return nullptr;
 723   }
 724 
 725   HeapWord* result = nullptr;
 726   try_recycle_trashed(r);
 727   in_new_region = r->is_empty();
 728 
 729   if (in_new_region) {
 730     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 731                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));




























 732   }
 733 
 734   // req.size() is in words, r->free() is in bytes.
 735   if (req.is_lab_alloc()) {
 736     // This is a GCLAB or a TLAB allocation
 737     size_t adjusted_size = req.size();
 738     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 739     if (adjusted_size > free) {
 740       adjusted_size = free;
 741     }
 742     if (adjusted_size >= req.min_size()) {
 743       result = r->allocate(adjusted_size, req.type());
 744       log_debug(gc)("Allocated " SIZE_FORMAT " words (adjusted from " SIZE_FORMAT ") for %s @" PTR_FORMAT
 745                           " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT,
 746                           adjusted_size, req.size(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result),
 747                           _partitions.partition_membership_name(r->index()), r->index(), r->free());
 748       assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
 749       req.set_actual_size(adjusted_size);











 750     } else {
 751       log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
 752                           " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());












 753     }
 754   } else {
 755     size_t size = req.size();
 756     result = r->allocate(size, req.type());
 757     if (result != nullptr) {
 758       // Record actual allocation size
 759       log_debug(gc)("Allocated " SIZE_FORMAT " words for %s @" PTR_FORMAT
 760                           " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT,
 761                           size, ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result),
 762                           _partitions.partition_membership_name(r->index()),  r->index(), r->free());
 763       req.set_actual_size(size);
 764     }
 765   }
 766 
 767   if (result != nullptr) {
 768     // Allocation successful, bump stats:
 769     if (req.is_mutator_alloc()) {

 770       _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
 771     } else {
 772       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
 773 
 774       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
 775       // evacuation are not updated during evacuation.

 776       r->set_update_watermark(r->top());







 777     }
 778   }
 779 
 780   static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
 781   size_t ac = alloc_capacity(r);
 782 
 783   if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
 784     // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
 785     // Note that retire_from_partition() increases used to account for waste.
 786 
 787     // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
 788     // then retire the region so that subsequent searches can find available memory more quickly.
 789 
 790     size_t idx = r->index();
 791     _partitions.retire_from_partition(req.is_mutator_alloc()?
 792                                       ShenandoahFreeSetPartitionId::Mutator: ShenandoahFreeSetPartitionId::Collector,
 793                                       idx, r->used());













 794     _partitions.assert_bounds();
 795   }
 796   return result;
 797 }
 798 
 799 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 800   assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
 801   shenandoah_assert_heaplocked();
 802 
 803   size_t words_size = req.size();
 804   idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 805 



 806   // Check if there are enough regions left to satisfy allocation.
 807   if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
 808     return nullptr;
 809   }
 810 
 811   idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 812   idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
 813   idx_t last_possible_start = end_range - num;
 814 
 815   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 816   // inclusive. Contiguous allocations are biased to the beginning.
 817   idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
 818                                                                             start_range, num);
 819   if (beg > last_possible_start) {
 820     // Hit the end, goodbye
 821     return nullptr;
 822   }
 823   idx_t end = beg;
 824 
 825   while (true) {
 826     // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
 827     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.  If we can extend
 828     // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
 829     while (!can_allocate_from(_heap->get_region(end))) {
 830       // region[end] is not empty, so we restart our search after region[end]
 831       idx_t slide_delta = end + 1 - beg;
 832       if (beg + slide_delta > last_possible_start) {
 833         // no room to slide
 834         return nullptr;
 835       }
 836       for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
 837         if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
 838           beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,

 865     ShenandoahHeapRegion* r = _heap->get_region(i);
 866     try_recycle_trashed(r);
 867 
 868     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 869     assert(r->is_empty(), "Should be empty");
 870 
 871     if (i == beg) {
 872       r->make_humongous_start();
 873     } else {
 874       r->make_humongous_cont();
 875     }
 876 
 877     // Trailing region may be non-full, record the remainder there
 878     size_t used_words;
 879     if ((i == end) && (remainder != 0)) {
 880       used_words = remainder;
 881     } else {
 882       used_words = ShenandoahHeapRegion::region_size_words();
 883     }
 884 


 885     r->set_top(r->bottom() + used_words);
 886   }
 887 
 888   if (remainder != 0) {
 889     // Record this remainder as allocation waste
 890     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 891   }
 892 
 893   // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
 894   _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
 895 
 896   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
 897   _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
 898   _partitions.assert_bounds();
 899   req.set_actual_size(words_size);



 900   return _heap->get_region(beg)->bottom();
 901 }
 902 
 903 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) {
 904   if (r->is_trash()) {
 905     _heap->decrease_used(r->used());
 906     r->recycle();
 907   }
 908 }
 909 
 910 void ShenandoahFreeSet::recycle_trash() {
 911   // lock is not reentrable, check we don't have it
 912   shenandoah_assert_not_heaplocked();
 913   size_t count = 0;
 914   for (size_t i = 0; i < _heap->num_regions(); i++) {
 915     ShenandoahHeapRegion* r = _heap->get_region(i);
 916     if (r->is_trash()) {
 917       _trash_regions[count++] = r;
 918     }
 919   }
 920 
 921   size_t total_batches = 0;
 922   jlong batch_start_time = 0;
 923   jlong recycle_trash_start_time = os::javaTimeNanos();    // This value will be treated as the initial batch_start_time
 924   jlong batch_end_time = recycle_trash_start_time;
 925   // Process as many batches as can be processed within 10 us.

 943       // to be processed between yields most of the time.
 944       //
 945       // Note that deadline is enforced since the end of previous batch.  In the case that yield() or acquisition of heap lock
 946       // takes a "long time", we will have less time to process regions, but we will always process at least one batch between
 947       // yields.  Yielding more frequently when there is heavy contention for the heap lock or for CPU cores is considered the
 948       // right thing to do.
 949       const size_t REGIONS_PER_BATCH = 32;
 950       size_t max_idx = MIN2(count, idx + REGIONS_PER_BATCH);
 951       while (idx < max_idx) {
 952         try_recycle_trashed(_trash_regions[idx++]);
 953       }
 954       total_batches++;
 955       batch_end_time = os::javaTimeNanos();
 956       // Estimate includes historic combination of yield times and heap lock acquisition times.
 957       batch_process_time_estimate = (batch_end_time - recycle_trash_start_time) / total_batches;
 958       predicted_next_batch_end_time = batch_end_time + batch_process_time_estimate;
 959     } while ((idx < count) && (predicted_next_batch_end_time < deadline));
 960   }
 961 }
 962 





















 963 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 964   size_t idx = r->index();
 965 
 966   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
 967   assert(can_allocate_from(r), "Should not be allocated");
 968 
 969   size_t ac = alloc_capacity(r);
 970   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
 971                                                ShenandoahFreeSetPartitionId::Collector, ac);
 972   _partitions.assert_bounds();
 973 
 974   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
 975   // to recycle trash before attempting to allocate anything in the region.
 976 }
 977 
 978 void ShenandoahFreeSet::clear() {
 979   shenandoah_assert_heaplocked();
 980   clear_internal();
 981 }
 982 
 983 void ShenandoahFreeSet::clear_internal() {
 984   _partitions.make_all_regions_unavailable();
 985 }
 986 
 987 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &cset_regions) {




 988 
 989   cset_regions = 0;


 990   clear_internal();







 991   size_t region_size_bytes = _partitions.region_size_bytes();
 992   size_t max_regions = _partitions.max_regions();
 993 
 994   size_t mutator_leftmost = max_regions;
 995   size_t mutator_rightmost = 0;
 996   size_t mutator_leftmost_empty = max_regions;
 997   size_t mutator_rightmost_empty = 0;
 998 
 999   size_t mutator_regions = 0;
1000   size_t mutator_used = 0;
1001 
1002   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {








1003     ShenandoahHeapRegion* region = _heap->get_region(idx);
1004     if (region->is_trash()) {
1005       // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
1006       // The cset regions are not "trashed" until we have finished update refs.
1007       cset_regions++;












1008     }
1009     if (region->is_alloc_allowed() || region->is_trash()) {

1010 
1011       // Do not add regions that would almost surely fail allocation
1012       size_t ac = alloc_capacity(region);
1013       if (ac > PLAB::min_size() * HeapWordSize) {
1014         _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
1015 
1016         if (idx < mutator_leftmost) {
1017           mutator_leftmost = idx;
1018         }
1019         if (idx > mutator_rightmost) {
1020           mutator_rightmost = idx;
1021         }
1022         if (ac == region_size_bytes) {
1023           if (idx < mutator_leftmost_empty) {
1024             mutator_leftmost_empty = idx;













1025           }
1026           if (idx > mutator_rightmost_empty) {
1027             mutator_rightmost_empty = idx;
1028           }










1029         }
1030         mutator_regions++;
1031         mutator_used += (region_size_bytes - ac);
1032 
1033         log_debug(gc)(
1034           "  Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator partition",
1035           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1036           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1037       }
1038     }
1039   }


















1040   idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost;
1041   idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty;
1042   _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx,
1043                                           mutator_regions, mutator_used);
















































1044 }
1045 
1046 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
1047   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1048   size_t collector_empty_xfer = 0;
1049   size_t collector_not_empty_xfer = 0;
1050 
1051   // Process empty regions within the Collector free partition
1052   if ((max_xfer_regions > 0) &&
1053       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1054        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1055     ShenandoahHeapLocker locker(_heap->lock());
1056     idx_t rightmost = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector);
1057     for (idx_t idx = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector);
1058          (max_xfer_regions > 0) && (idx <= rightmost); ) {
1059       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
1060              "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1061       // Note: can_allocate_from() denotes that region is entirely empty
1062       if (can_allocate_from(idx)) {
1063         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector,
1064                                                      ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1065         max_xfer_regions--;
1066         collector_empty_xfer += region_size_bytes;
1067       }
1068       idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);



1069     }
1070   }
1071 
1072   // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1073   if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1074                                  <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1075     ShenandoahHeapLocker locker(_heap->lock());
1076     idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
1077     for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1078          (max_xfer_regions > 0) && (idx <= rightmost); ) {
1079       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
1080              "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1081       size_t ac = alloc_capacity(idx);
1082       if (ac > 0) {
1083         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector,
1084                                                      ShenandoahFreeSetPartitionId::Mutator, ac);
1085         max_xfer_regions--;
1086         collector_not_empty_xfer += ac;
1087       }
1088       idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1089     }
1090   }
1091 
1092   size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
1093   log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve",
1094                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer));



1095 }
1096 
1097 void ShenandoahFreeSet::prepare_to_rebuild(size_t &cset_regions) {



1098   shenandoah_assert_heaplocked();








1099 
1100   log_debug(gc)("Rebuilding FreeSet");






1101 
1102   // This places regions that have alloc_capacity into the mutator partition.
1103   find_regions_with_alloc_capacity(cset_regions);
















1104 }
1105 
1106 void ShenandoahFreeSet::finish_rebuild(size_t cset_regions) {

1107   shenandoah_assert_heaplocked();

1108 
1109   // Our desire is to reserve this much memory for future evacuation.  We may end up reserving less, if
1110   // memory is in short supply.
1111 
1112   size_t reserve = _heap->max_capacity() * ShenandoahEvacReserve / 100;
1113   size_t available_in_collector_partition = (_partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector)
1114                                              - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector));
1115   size_t additional_reserve;
1116   if (available_in_collector_partition < reserve) {
1117     additional_reserve = reserve - available_in_collector_partition;
1118   } else {
1119     additional_reserve = 0;

1120   }
1121 
1122   reserve_regions(reserve);





1123   _partitions.assert_bounds();
1124   log_status();
1125 }
1126 
1127 void ShenandoahFreeSet::rebuild() {
1128   size_t cset_regions;
1129   prepare_to_rebuild(cset_regions);
1130   finish_rebuild(cset_regions);



































































1131 }
1132 
1133 void ShenandoahFreeSet::reserve_regions(size_t to_reserve) {





1134   for (size_t i = _heap->num_regions(); i > 0; i--) {
1135     size_t idx = i - 1;
1136     ShenandoahHeapRegion* r = _heap->get_region(idx);
1137 
1138     if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1139       continue;
1140     }
1141 
1142     size_t ac = alloc_capacity(r);
1143     assert (ac > 0, "Membership in free partition implies has capacity");

1144 

1145     bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1146     if (!move_to_collector) {
1147       // We've satisfied to_reserve

1148       break;
1149     }
1150 




















1151     if (move_to_collector) {
1152       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1153       // they were entirely empty.  This has the effect of causing new Mutator allocation to reside next to objects
1154       // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1155       // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1156       // survivor objects is less likely to be selected for the collection set.  This alternative implementation allows
1157       // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1158       // occupy regions comprised entirely of ephemeral objects.  These regions are highly likely to be included in the next
1159       // collection set, and they are easily evacuated because they have low density of live objects.
1160       _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1161                                                    ShenandoahFreeSetPartitionId::Collector, ac);
1162       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);






1163     }
1164   }
1165 
1166   if (LogTarget(Info, gc, free)::is_enabled()) {





1167     size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
1168     if (reserve < to_reserve) {
1169       log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1170                     PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
1171     }
1172   }
1173 }
1174 































1175 void ShenandoahFreeSet::log_status_under_lock() {
1176   // Must not be heap locked, it acquires heap lock only when log is enabled
1177   shenandoah_assert_not_heaplocked();
1178   if (LogTarget(Info, gc, free)::is_enabled()
1179       DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
1180     ShenandoahHeapLocker locker(_heap->lock());
1181     log_status();
1182   }
1183 }
1184 
1185 void ShenandoahFreeSet::log_status() {
1186   shenandoah_assert_heaplocked();
1187 
1188 #ifdef ASSERT
1189   // Dump of the FreeSet details is only enabled if assertions are enabled
1190   if (LogTarget(Debug, gc, free)::is_enabled()) {
1191 #define BUFFER_SIZE 80
1192     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1193     size_t consumed_collector = 0;
1194     size_t available_collector = 0;
1195     size_t consumed_mutator = 0;
1196     size_t available_mutator = 0;
1197 
1198     char buffer[BUFFER_SIZE];
1199     for (uint i = 0; i < BUFFER_SIZE; i++) {
1200       buffer[i] = '\0';
1201     }
1202     log_debug(gc)("FreeSet map legend: M:mutator_free C:collector_free H:humongous _:retired");
1203     log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "],"
1204                   " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "]",




1205                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1206                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),

1207                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1208                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));



1209 
1210     for (uint i = 0; i < _heap->num_regions(); i++) {
1211       ShenandoahHeapRegion *r = _heap->get_region(i);
1212       uint idx = i % 64;
1213       if ((i != 0) && (idx == 0)) {
1214         log_debug(gc)(" %6u: %s", i-64, buffer);
1215       }
1216       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
1217         size_t capacity = alloc_capacity(r);
1218         available_mutator += capacity;
1219         consumed_mutator += region_size_bytes - capacity;
1220         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1221       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
1222         size_t capacity = alloc_capacity(r);
1223         available_collector += capacity;
1224         consumed_collector += region_size_bytes - capacity;
1225         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';


1226       } else if (r->is_humongous()) {
1227         buffer[idx] = 'h';




1228       } else {
1229         buffer[idx] = '_';




1230       }
1231     }
1232     uint remnant = _heap->num_regions() % 64;
1233     if (remnant > 0) {
1234       buffer[remnant] = '\0';
1235     } else {
1236       remnant = 64;
1237     }
1238     log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1239   }
1240 #endif
1241 
1242   LogTarget(Info, gc, free) lt;
1243   if (lt.is_enabled()) {
1244     ResourceMark rm;
1245     LogStream ls(lt);
1246 
1247     {
1248       idx_t last_idx = 0;
1249       size_t max = 0;

1267             } else {
1268               empty_contig = 1;
1269             }
1270           } else {
1271             empty_contig = 0;
1272           }
1273           total_used += r->used();
1274           total_free += free;
1275           max_contig = MAX2(max_contig, empty_contig);
1276           last_idx = idx;
1277         }
1278       }
1279 
1280       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1281       size_t free = capacity() - used();
1282 
1283       // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1284       // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1285       // my internally tracked values of used() and free().
1286       assert(free == total_free, "Free memory should match");
1287 
1288       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1289                byte_size_in_proper_unit(free),          proper_unit_for_byte_size(free),
1290                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1291                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1292       );
1293 
1294       ls.print("Frag: ");
1295       size_t frag_ext;
1296       if (total_free_ext > 0) {
1297         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1298       } else {
1299         frag_ext = 0;
1300       }
1301       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1302 
1303       size_t frag_int;
1304       if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1305         frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1306                     / ShenandoahHeapRegion::region_size_bytes());
1307       } else {
1308         frag_int = 0;
1309       }

1316     {
1317       size_t max = 0;
1318       size_t total_free = 0;
1319       size_t total_used = 0;
1320 
1321       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1322            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1323         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
1324           ShenandoahHeapRegion *r = _heap->get_region(idx);
1325           size_t free = alloc_capacity(r);
1326           max = MAX2(max, free);
1327           total_free += free;
1328           total_used += r->used();
1329         }
1330       }
1331       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1332                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1333                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1334                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1335     }





















1336   }
1337 }
1338 
1339 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1340   shenandoah_assert_heaplocked();
1341   if (ShenandoahHeapRegion::requires_humongous(req.size())) {
1342     switch (req.type()) {
1343       case ShenandoahAllocRequest::_alloc_shared:
1344       case ShenandoahAllocRequest::_alloc_shared_gc:
1345         in_new_region = true;
1346         return allocate_contiguous(req);

1347       case ShenandoahAllocRequest::_alloc_gclab:
1348       case ShenandoahAllocRequest::_alloc_tlab:
1349         in_new_region = false;
1350         assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size());
1351         return nullptr;
1352       default:
1353         ShouldNotReachHere();
1354         return nullptr;
1355     }
1356   } else {
1357     return allocate_single(req, in_new_region);
1358   }
1359 }
1360 
1361 void ShenandoahFreeSet::print_on(outputStream* out) const {
1362   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
1363   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1364   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1365     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1366            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1367     _heap->get_region(index)->print_on(out);
1368     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1369   }

1370   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
1371   rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
1372   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
1373     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
1374            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1375     _heap->get_region(index)->print_on(out);
1376     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);









1377   }
1378 }
1379 
1380 double ShenandoahFreeSet::internal_fragmentation() {
1381   double squared = 0;
1382   double linear = 0;
1383 
1384   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1385   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1386     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1387            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1388     ShenandoahHeapRegion* r = _heap->get_region(index);
1389     size_t used = r->used();
1390     squared += used * used;
1391     linear += used;
1392     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1393   }
1394 
1395   if (linear > 0) {
1396     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
1397     return 1 - s;
1398   } else {
1399     return 0;
1400   }
1401 }
1402 
1403 double ShenandoahFreeSet::external_fragmentation() {
1404   idx_t last_idx = 0;
1405   size_t max_contig = 0;
1406   size_t empty_contig = 0;
1407 
1408   size_t free = 0;
1409 
1410   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1411   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1412     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1413            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1414     ShenandoahHeapRegion* r = _heap->get_region(index);
1415     if (r->is_empty()) {
1416       free += ShenandoahHeapRegion::region_size_bytes();
1417       if (last_idx + 1 == index) {
1418         empty_contig++;
1419       } else {
1420         empty_contig = 1;
1421       }
1422     } else {
1423       empty_contig = 0;
1424     }
1425     max_contig = MAX2(max_contig, empty_contig);
1426     last_idx = index;
1427     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1428   }
1429 
1430   if (free > 0) {
1431     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
1432   } else {
1433     return 0;
1434   }
1435 }
1436 

   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
  29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  32 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  33 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
  36 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "runtime/orderAccess.hpp"
  40 
  41 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
  42   switch (t) {
  43     case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
  44     case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
  45     case ShenandoahFreeSetPartitionId::Collector: return "Collector";
  46     case ShenandoahFreeSetPartitionId::OldCollector: return "OldCollector";
  47     default:
  48       ShouldNotReachHere();
  49       return "Unrecognized";
  50   }
  51 }
  52 
  53 class ShenandoahLeftRightIterator {
  54 private:
  55   idx_t _idx;
  56   idx_t _end;
  57   ShenandoahRegionPartitions* _partitions;
  58   ShenandoahFreeSetPartitionId _partition;
  59 public:
  60   explicit ShenandoahLeftRightIterator(ShenandoahRegionPartitions* partitions, ShenandoahFreeSetPartitionId partition, bool use_empty = false)
  61     : _idx(0), _end(0), _partitions(partitions), _partition(partition) {
  62     _idx = use_empty ? _partitions->leftmost_empty(_partition) : _partitions->leftmost(_partition);
  63     _end = use_empty ? _partitions->rightmost_empty(_partition) : _partitions->rightmost(_partition);
  64   }
  65 
  66   bool has_next() const {
  67     if (_idx <= _end) {
  68       assert(_partitions->in_free_set(_partition, _idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, _idx);
  69       return true;
  70     }
  71     return false;
  72   }
  73 
  74   idx_t current() const {
  75     return _idx;
  76   }
  77 
  78   idx_t next() {
  79     _idx = _partitions->find_index_of_next_available_region(_partition, _idx + 1);
  80     return current();
  81   }
  82 };
  83 
  84 class ShenandoahRightLeftIterator {
  85 private:
  86   idx_t _idx;
  87   idx_t _end;
  88   ShenandoahRegionPartitions* _partitions;
  89   ShenandoahFreeSetPartitionId _partition;
  90 public:
  91   explicit ShenandoahRightLeftIterator(ShenandoahRegionPartitions* partitions, ShenandoahFreeSetPartitionId partition, bool use_empty = false)
  92     : _idx(0), _end(0), _partitions(partitions), _partition(partition) {
  93     _idx = use_empty ? _partitions->rightmost_empty(_partition) : _partitions->rightmost(_partition);
  94     _end = use_empty ? _partitions->leftmost_empty(_partition) : _partitions->leftmost(_partition);
  95   }
  96 
  97   bool has_next() const {
  98     if (_idx >= _end) {
  99       assert(_partitions->in_free_set(_partition, _idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, _idx);
 100       return true;
 101     }
 102     return false;
 103   }
 104 
 105   idx_t current() const {
 106     return _idx;
 107   }
 108 
 109   idx_t next() {
 110     _idx = _partitions->find_index_of_previous_available_region(_partition, _idx - 1);
 111     return current();
 112   }
 113 };
 114 
 115 #ifndef PRODUCT
 116 void ShenandoahRegionPartitions::dump_bitmap() const {
 117   log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
 118                "], Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
 119                _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
 120                _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
 121                _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
 122                _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)],
 123                _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)],
 124                _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]);
 125   log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
 126                "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
 127                "], Empty Old Collecto range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
 128                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
 129                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
 130                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 131                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 132                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 133                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]);
 134 
 135   log_debug(gc)("%6s: %18s %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "Old Collector Bits", "NotFree Bits");
 136   dump_bitmap_range(0, _max-1);
 137 }
 138 
 139 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
 140   assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
 141   assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
 142   idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
 143   idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
 144   idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
 145   while (aligned_start <= aligned_end) {
 146     dump_bitmap_row(aligned_start);
 147     aligned_start += alignment;
 148   }
 149 }
 150 
 151 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
 152   assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
 153   idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
 154   uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
 155   uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
 156   uintx old_collector_bits = _membership[int(ShenandoahFreeSetPartitionId::OldCollector)].bits_at(aligned_idx);
 157   uintx free_bits = mutator_bits | collector_bits | old_collector_bits;
 158   uintx notfree_bits =  ~free_bits;
 159   log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
 160                aligned_idx, mutator_bits, collector_bits, old_collector_bits, notfree_bits);
 161 }
 162 #endif
 163 
 164 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
 165     _max(max_regions),
 166     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
 167     _free_set(free_set),
 168     _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) , ShenandoahSimpleBitMap(max_regions) }
 169 {
 170   make_all_regions_unavailable();
 171 }
 172 
 173 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 174   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 175 }
 176 
 177 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 178   ShenandoahHeapRegion* r = _heap->get_region(idx);
 179   return can_allocate_from(r);
 180 }
 181 
 182 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 183   if (r->is_trash()) {
 184     // This would be recycled on allocation path
 185     return ShenandoahHeapRegion::region_size_bytes();
 186   } else {
 187     return r->free();
 188   }

 218   // which_partition is shrinking after the region that used to be leftmost is retired.
 219   return idx;
 220 }
 221 
 222 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
 223   for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
 224     _membership[partition_id].clear_all();
 225     _leftmosts[partition_id] = _max;
 226     _rightmosts[partition_id] = -1;
 227     _leftmosts_empty[partition_id] = _max;
 228     _rightmosts_empty[partition_id] = -1;;
 229     _capacity[partition_id] = 0;
 230     _used[partition_id] = 0;
 231   }
 232   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 233 }
 234 
 235 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
 236                                                              idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
 237                                                              size_t mutator_region_count, size_t mutator_used) {

 238   _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
 239   _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
 240   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
 241   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
 242 
 243   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 244   _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
 245   _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
 246 
 247   _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 248   _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 249   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 250   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 251 
 252   _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 253   _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 254   _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 255 }
 256 
 257 void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost,
 258                                                                    idx_t old_collector_leftmost_empty,
 259                                                                    idx_t old_collector_rightmost_empty,
 260                                                                    size_t old_collector_region_count, size_t old_collector_used) {
 261   _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost;
 262   _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost;
 263   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty;
 264   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost_empty;
 265 
 266   _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count;
 267   _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used;
 268   _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count * _region_size_bytes;
 269 }
 270 
 271 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
 272   assert (which_partition < NumPartitions, "Partition must be valid");
 273   _used[int(which_partition)] += bytes;
 274   assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
 275           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
 276           _used[int(which_partition)], _capacity[int(which_partition)], bytes);
 277 }
 278 
 279 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
 280   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 281   assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
 282   if (low_idx == leftmost(partition)) {
 283     assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
 284     if (high_idx + 1 == _max) {
 285       _leftmosts[int(partition)] = _max;
 286     } else {
 287       _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
 288     }
 289     if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
 290       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
 291       _leftmosts_empty[int(partition)] = _leftmosts[int(partition)];
 292     }
 293   }
 294   if (high_idx == _rightmosts[int(partition)]) {
 295     assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
 296     if (low_idx == 0) {
 297       _rightmosts[int(partition)] = -1;
 298     } else {
 299       _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
 300     }
 301     if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
 302       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 303       _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
 304     }
 305   }
 306   if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
 307     _leftmosts[int(partition)] = _max;
 308     _rightmosts[int(partition)] = -1;
 309     _leftmosts_empty[int(partition)] = _max;
 310     _rightmosts_empty[int(partition)] = -1;
 311   }

 358 
 359   if (used_bytes < _region_size_bytes) {
 360     // Count the alignment pad remnant of memory as used when we retire this region
 361     increase_used(partition, _region_size_bytes - used_bytes);
 362   }
 363   _membership[int(partition)].clear_bit(idx);
 364   shrink_interval_if_boundary_modified(partition, idx);
 365   _region_counts[int(partition)]--;
 366 }
 367 
 368 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
 369   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 370   assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
 371   assert (which_partition < NumPartitions, "selected free partition must be valid");
 372   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 373 
 374   _membership[int(which_partition)].set_bit(idx);
 375   _capacity[int(which_partition)] += _region_size_bytes;
 376   _used[int(which_partition)] += _region_size_bytes - available;
 377   expand_interval_if_boundary_modified(which_partition, idx, available);

 378   _region_counts[int(which_partition)]++;
 379 }
 380 
 381 bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) {
 382   return (p == ShenandoahFreeSetPartitionId::Mutator);
 383 }
 384 
 385 bool ShenandoahRegionPartitions::is_young_collector_partition(ShenandoahFreeSetPartitionId p) {
 386   return (p == ShenandoahFreeSetPartitionId::Collector);
 387 }
 388 
 389 bool ShenandoahRegionPartitions::is_old_collector_partition(ShenandoahFreeSetPartitionId p) {
 390   return (p == ShenandoahFreeSetPartitionId::OldCollector);
 391 }
 392 
 393 bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_region) {
 394   return (available_in_region == _region_size_bytes);
 395 }
 396 
 397 
 398 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
 399                                                                   ShenandoahFreeSetPartitionId new_partition, size_t available) {
 400   ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx);
 401   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 402   assert (orig_partition < NumPartitions, "Original partition must be valid");
 403   assert (new_partition < NumPartitions, "New partition must be valid");
 404   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 405   assert (_membership[int(orig_partition)].is_set(idx), "Cannot move from partition unless in partition");
 406   assert ((r != nullptr) && ((r->is_trash() && (available == _region_size_bytes)) ||
 407                              (r->used() + available == _region_size_bytes)),
 408           "Used: " SIZE_FORMAT " + available: " SIZE_FORMAT " should equal region size: " SIZE_FORMAT,
 409           ShenandoahHeap::heap()->get_region(idx)->used(), available, _region_size_bytes);
 410 
 411   // Expected transitions:
 412   //  During rebuild:         Mutator => Collector
 413   //                          Mutator empty => Collector
 414   //                          Mutator empty => OldCollector
 415   //  During flip_to_gc:      Mutator empty => Collector
 416   //                          Mutator empty => OldCollector
 417   // At start of update refs: Collector => Mutator
 418   //                          OldCollector Empty => Mutator
 419   assert ((is_mutator_partition(orig_partition) && is_young_collector_partition(new_partition)) ||
 420           (is_mutator_partition(orig_partition) &&
 421            available_implies_empty(available) && is_old_collector_partition(new_partition)) ||
 422           (is_young_collector_partition(orig_partition) && is_mutator_partition(new_partition)) ||
 423           (is_old_collector_partition(orig_partition)
 424            && available_implies_empty(available) && is_mutator_partition(new_partition)),
 425           "Unexpected movement between partitions, available: " SIZE_FORMAT ", _region_size_bytes: " SIZE_FORMAT
 426           ", orig_partition: %s, new_partition: %s",
 427           available, _region_size_bytes, partition_name(orig_partition), partition_name(new_partition));
 428 
 429   size_t used = _region_size_bytes - available;
 430   assert (_used[int(orig_partition)] >= used,
 431           "Orig partition used: " SIZE_FORMAT " must exceed moved used: " SIZE_FORMAT " within region " SSIZE_FORMAT,
 432           _used[int(orig_partition)], used, idx);
 433 
 434   _membership[int(orig_partition)].clear_bit(idx);
 435   _membership[int(new_partition)].set_bit(idx);
 436 
 437   _capacity[int(orig_partition)] -= _region_size_bytes;
 438   _used[int(orig_partition)] -= used;
 439   shrink_interval_if_boundary_modified(orig_partition, idx);
 440 
 441   _capacity[int(new_partition)] += _region_size_bytes;;
 442   _used[int(new_partition)] += used;
 443   expand_interval_if_boundary_modified(new_partition, idx, available);
 444 
 445   _region_counts[int(orig_partition)]--;
 446   _region_counts[int(new_partition)]++;
 447 }
 448 
 449 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
 450   return partition_name(membership(idx));
 451 }
 452 

 581   idx_t leftmosts[UIntNumPartitions];
 582   idx_t rightmosts[UIntNumPartitions];
 583   idx_t empty_leftmosts[UIntNumPartitions];
 584   idx_t empty_rightmosts[UIntNumPartitions];
 585 
 586   for (uint i = 0; i < UIntNumPartitions; i++) {
 587     leftmosts[i] = _max;
 588     empty_leftmosts[i] = _max;
 589     rightmosts[i] = -1;
 590     empty_rightmosts[i] = -1;
 591   }
 592 
 593   for (idx_t i = 0; i < _max; i++) {
 594     ShenandoahFreeSetPartitionId partition = membership(i);
 595     switch (partition) {
 596       case ShenandoahFreeSetPartitionId::NotFree:
 597         break;
 598 
 599       case ShenandoahFreeSetPartitionId::Mutator:
 600       case ShenandoahFreeSetPartitionId::Collector:
 601       case ShenandoahFreeSetPartitionId::OldCollector:
 602       {
 603         size_t capacity = _free_set->alloc_capacity(i);
 604         bool is_empty = (capacity == _region_size_bytes);
 605         assert(capacity > 0, "free regions must have allocation capacity");
 606         if (i < leftmosts[int(partition)]) {
 607           leftmosts[int(partition)] = i;
 608         }
 609         if (is_empty && (i < empty_leftmosts[int(partition)])) {
 610           empty_leftmosts[int(partition)] = i;
 611         }
 612         if (i > rightmosts[int(partition)]) {
 613           rightmosts[int(partition)] = i;
 614         }
 615         if (is_empty && (i > empty_rightmosts[int(partition)])) {
 616           empty_rightmosts[int(partition)] = i;
 617         }
 618         break;
 619       }
 620 
 621       default:

 671 
 672   // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 673   // Likewise for empty region partitions.
 674   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 675   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 676   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
 677           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 678           beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
 679   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
 680           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 681           end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
 682 
 683   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 684   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 685   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 686           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 687           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
 688   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 689           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 690           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
 691 
 692   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 693   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 694           leftmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
 695   assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 696           rightmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
 697 
 698   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
 699           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::OldCollector),
 700                                   ShenandoahFreeSetPartitionId::OldCollector),
 701           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::OldCollector));
 702   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
 703           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::OldCollector),
 704                                   ShenandoahFreeSetPartitionId::OldCollector),
 705           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 706 
 707   // If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 708   // Likewise for empty region partitions.
 709   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 710   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 711   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
 712           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 713           beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
 714   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
 715           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 716           end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 717 
 718   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 719   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 720   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 721           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 722           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 723   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 724           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 725           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 726 }
 727 #endif
 728 
 729 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 730   _heap(heap),
 731   _partitions(max_regions, this),
 732   _trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),

 733   _alloc_bias_weight(0)
 734 {
 735   clear_internal();
 736 }
 737 
 738 void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
 739   shenandoah_assert_heaplocked();
 740   size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
 741   size_t idx = region->index();
 742   size_t capacity = alloc_capacity(region);
 743   assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree,
 744          "Regions promoted in place should have been excluded from Mutator partition");
 745   if (capacity >= plab_min_size_in_bytes) {
 746     _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, capacity);
 747     _heap->old_generation()->augment_promoted_reserve(capacity);
 748   }
 749 }
 750 
 751 HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahAffiliation affiliation,
 752                                                                       ShenandoahAllocRequest& req, bool& in_new_region) {
 753 
 754   shenandoah_assert_heaplocked();
 755   ShenandoahFreeSetPartitionId which_partition = req.is_old()? ShenandoahFreeSetPartitionId::OldCollector: ShenandoahFreeSetPartitionId::Collector;
 756   if (_partitions.alloc_from_left_bias(which_partition)) {
 757     ShenandoahLeftRightIterator iterator(&_partitions, which_partition, affiliation == ShenandoahAffiliation::FREE);
 758     return allocate_with_affiliation(iterator, affiliation, req, in_new_region);
 759   } else {
 760     ShenandoahRightLeftIterator iterator(&_partitions, which_partition, affiliation == ShenandoahAffiliation::FREE);
 761     return allocate_with_affiliation(iterator, affiliation, req, in_new_region);
 762   }
 763 }
 764 
 765 template<typename Iter>
 766 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(Iter& iterator, ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
 767   for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
 768     ShenandoahHeapRegion* r = _heap->get_region(idx);
 769     if (r->affiliation() == affiliation) {
 770       HeapWord* result = try_allocate_in(r, req, in_new_region);
 771       if (result != nullptr) {
 772         return result;
 773       }
 774     }
 775   }
 776   log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
 777                       shenandoah_affiliation_name(affiliation), p2i(&req));
 778   return nullptr;
 779 }
 780 
 781 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 782   shenandoah_assert_heaplocked();
 783 
 784   // Scan the bitmap looking for a first fit.
 785   //
 786   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 787   // we would find the region to allocate at right away.
 788   //
 789   // Allocations are biased: GC allocations are taken from the high end of the heap.  Regular (and TLAB)
 790   // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
 791   // Humongous mutator allocations are taken from the bottom of the heap.
 792   //
 793   // Free set maintains mutator and collector partitions.  Normally, each allocates only from its partition,
 794   // except in special cases when the collector steals regions from the mutator partition.
 795 
 796   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 797 
 798   switch (req.type()) {
 799     case ShenandoahAllocRequest::_alloc_tlab:
 800     case ShenandoahAllocRequest::_alloc_shared:
 801       return allocate_for_mutator(req, in_new_region);

































































 802     case ShenandoahAllocRequest::_alloc_gclab:
 803     case ShenandoahAllocRequest::_alloc_plab:
 804     case ShenandoahAllocRequest::_alloc_shared_gc:
 805       return allocate_for_collector(req, in_new_region);
 806     default:
 807       ShouldNotReachHere();
 808   }
 809   return nullptr;
 810 }






 811 
 812 HeapWord* ShenandoahFreeSet::allocate_for_mutator(ShenandoahAllocRequest &req, bool &in_new_region) {
 813   update_allocation_bias();


 814 
 815   if (_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 816     // There is no recovery. Mutator does not touch collector view at all.
 817     return nullptr;
 818   }
 819 
 820   // Try to allocate in the mutator view
 821   if (_partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)) {
 822     // Allocate from low to high memory.  This keeps the range of fully empty regions more tightly packed.
 823     // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle.  So this
 824     // tends to accumulate "fragmented" uncollected regions in high memory.
 825     ShenandoahLeftRightIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator);
 826     return allocate_from_regions(iterator, req, in_new_region);
 827   }
 828 
 829   // Allocate from high to low memory. This preserves low memory for humongous allocations.
 830   ShenandoahRightLeftIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator);
 831   return allocate_from_regions(iterator, req, in_new_region);
 832 }
 833 
 834 void ShenandoahFreeSet::update_allocation_bias() {
 835   if (_alloc_bias_weight-- <= 0) {
 836     // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
 837     // of the heap.  Typically, these are the more recently engaged regions and the objects in these regions have not
 838     // yet had a chance to die (and/or are treated as floating garbage).  If we use the same allocation bias on each
 839     // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
 840     // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
 841     // during the most recent GC pass may once again prevent the region from being collected.  We have found that
 842     // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
 843     // benchmarks.  In the best case, this has the effect of consuming these partially consumed regions before
 844     // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
 845     //
 846     // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
 847     // available regions.  Other potential benefits:
 848     //  1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
 849     //  2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
 850     //     late in the GC cycle.
 851     idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
 852                                - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
 853     idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
 854                                 - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 855     _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
 856     _alloc_bias_weight = INITIAL_ALLOC_BIAS_WEIGHT;
 857   }
 858 }
 859 
 860 template<typename Iter>
 861 HeapWord* ShenandoahFreeSet::allocate_from_regions(Iter& iterator, ShenandoahAllocRequest &req, bool &in_new_region) {
 862   for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
 863     ShenandoahHeapRegion* r = _heap->get_region(idx);
 864     size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab) ? req.min_size() : req.size();
 865     if (alloc_capacity(r) >= min_size) {
 866       HeapWord* result = try_allocate_in(r, req, in_new_region);
 867       if (result != nullptr) {
 868         return result;
 869       }
 870     }
 871   }
 872   return nullptr;
 873 }
 874 
 875 HeapWord* ShenandoahFreeSet::allocate_for_collector(ShenandoahAllocRequest &req, bool &in_new_region) {
 876   // Fast-path: try to allocate in the collector view first
 877   HeapWord* result;
 878   result = allocate_from_partition_with_affiliation(req.affiliation(), req, in_new_region);
 879   if (result != nullptr) {
 880     return result;
 881   }
 882 
 883   bool allow_new_region = can_allocate_in_new_region(req);
 884   if (allow_new_region) {
 885     // Try a free region that is dedicated to GC allocations.
 886     result = allocate_from_partition_with_affiliation(ShenandoahAffiliation::FREE, req, in_new_region);
 887     if (result != nullptr) {
 888       return result;
 889     }
 890   }
 891 
 892   // No dice. Can we borrow space from mutator view?
 893   if (!ShenandoahEvacReserveOverflow) {
 894     return nullptr;
 895   }
 896 
 897   if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
 898     // This allows us to flip a mutator region to old_collector
 899     allow_new_region = true;
 900   }
 901 
 902   // We should expand old-gen if this can prevent an old-gen evacuation failure.  We don't care so much about
 903   // promotion failures since they can be mitigated in a subsequent GC pass.  Would be nice to know if this
 904   // allocation request is for evacuation or promotion.  Individual threads limit their use of PLAB memory for
 905   // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
 906   // only for old-gen evacuations.
 907   if (allow_new_region) {
 908     // Try to steal an empty region from the mutator view.
 909     result = try_allocate_from_mutator(req, in_new_region);
 910   }
 911 
 912   // This is it. Do not try to mix mutator and GC allocations, because adjusting region UWM
 913   // due to GC allocations would expose unparsable mutator allocations.
 914   return result;
 915 }
 916 
 917 bool ShenandoahFreeSet::can_allocate_in_new_region(const ShenandoahAllocRequest& req) {
 918   if (!_heap->mode()->is_generational()) {
 919     return true;
 920   }
 921 
 922   assert(req.is_old() || req.is_young(), "Should request affiliation");
 923   return (req.is_old() && _heap->old_generation()->free_unaffiliated_regions() > 0)
 924          || (req.is_young() && _heap->young_generation()->free_unaffiliated_regions() > 0);
 925 }
 926 
 927 HeapWord* ShenandoahFreeSet::try_allocate_from_mutator(ShenandoahAllocRequest& req, bool& in_new_region) {
 928   // The collector prefers to keep longer lived regions toward the right side of the heap, so it always
 929   // searches for regions from right to left here.
 930   ShenandoahRightLeftIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator, true);
 931   for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
 932     ShenandoahHeapRegion* r = _heap->get_region(idx);
 933     if (can_allocate_from(r)) {
 934       if (req.is_old()) {
 935         flip_to_old_gc(r);
 936       } else {
 937         flip_to_gc(r);
 938       }
 939       // Region r is entirely empty.  If try_allocate_in fails on region r, something else is really wrong.
 940       // Don't bother to retry with other regions.
 941       log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 942       return try_allocate_in(r, req, in_new_region);
 943     }


 944   }
 945 
 946   return nullptr;
 947 }
 948 
 949 // This work method takes an argument corresponding to the number of bytes
 950 // free in a region, and returns the largest amount in heapwords that can be allocated
 951 // such that both of the following conditions are satisfied:
 952 //
 953 // 1. it is a multiple of card size
 954 // 2. any remaining shard may be filled with a filler object
 955 //
 956 // The idea is that the allocation starts and ends at card boundaries. Because
 957 // a region ('s end) is card-aligned, the remainder shard that must be filled is
 958 // at the start of the free space.
 959 //
 960 // This is merely a helper method to use for the purpose of such a calculation.
 961 size_t ShenandoahFreeSet::get_usable_free_words(size_t free_bytes) const {
 962   // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
 963   //      free is 514
 964   //      usable_free is 512, which is decreased to 0
 965   size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
 966   assert(usable_free <= free_bytes, "Sanity check");
 967   if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
 968     // After aligning to card multiples, the remainder would be smaller than
 969     // the minimum filler object, so we'll need to take away another card's
 970     // worth to construct a filler object.
 971     if (usable_free >= CardTable::card_size()) {
 972       usable_free -= CardTable::card_size();
 973     } else {
 974       assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
 975     }
 976   }
 977 
 978   return usable_free / HeapWordSize;
 979 }
 980 
 981 // Given a size argument, which is a multiple of card size, a request struct
 982 // for a PLAB, and an old region, return a pointer to the allocated space for
 983 // a PLAB which is card-aligned and where any remaining shard in the region
 984 // has been suitably filled by a filler object.
 985 // It is assumed (and assertion-checked) that such an allocation is always possible.
 986 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
 987   assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 988   assert(r->is_old(), "All PLABs reside in old-gen");
 989   assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
 990   assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design");
 991 
 992   HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
 993   assert(result != nullptr, "Allocation cannot fail");
 994   assert(r->top() <= r->end(), "Allocation cannot span end of region");
 995   assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
 996   return result;
 997 }
 998 
 999 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
1000   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
1001   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
1002     return nullptr;
1003   }

1004   HeapWord* result = nullptr;
1005   try_recycle_trashed(r);
1006   in_new_region = r->is_empty();
1007 
1008   if (in_new_region) {
1009     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
1010                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
1011     assert(!r->is_affiliated(), "New region " SIZE_FORMAT " should be unaffiliated", r->index());
1012     r->set_affiliation(req.affiliation());
1013     if (r->is_old()) {
1014       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
1015       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
1016       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
1017       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
1018       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
1019       // coalesce-and-fill processing.
1020       r->end_preemptible_coalesce_and_fill();
1021       _heap->old_generation()->clear_cards_for(r);
1022     }
1023     _heap->generation_for(r->affiliation())->increment_affiliated_region_count();
1024 
1025 #ifdef ASSERT
1026     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
1027     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
1028     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
1029 #endif
1030     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
1031                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
1032   } else {
1033     assert(r->is_affiliated(), "Region " SIZE_FORMAT " that is not new should be affiliated", r->index());
1034     if (r->affiliation() != req.affiliation()) {
1035       assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
1036              req.affiliation_name(), r->affiliation_name());
1037       return nullptr;
1038     }
1039   }
1040 
1041   // req.size() is in words, r->free() is in bytes.
1042   if (req.is_lab_alloc()) {

1043     size_t adjusted_size = req.size();
1044     size_t free = r->free();    // free represents bytes available within region r
1045     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1046       // This is a PLAB allocation
1047       assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
1048       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
1049              "PLABS must be allocated in old_collector_free regions");
1050 
1051       // Need to assure that plabs are aligned on multiple of card region
1052       // Convert free from unaligned bytes to aligned number of words
1053       size_t usable_free = get_usable_free_words(free);
1054       if (adjusted_size > usable_free) {
1055         adjusted_size = usable_free;
1056       }
1057       adjusted_size = align_down(adjusted_size, CardTable::card_size_in_words());
1058       if (adjusted_size >= req.min_size()) {
1059         result = allocate_aligned_plab(adjusted_size, req, r);
1060         assert(result != nullptr, "allocate must succeed");
1061         req.set_actual_size(adjusted_size);
1062       } else {
1063         // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
1064         log_trace(gc, free)("Failed to shrink PLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1065                             " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1066       }
1067     } else {
1068       // This is a GCLAB or a TLAB allocation
1069       // Convert free from unaligned bytes to aligned number of words
1070       free = align_down(free >> LogHeapWordSize, MinObjAlignment);
1071       if (adjusted_size > free) {
1072         adjusted_size = free;
1073       }
1074       if (adjusted_size >= req.min_size()) {
1075         result = r->allocate(adjusted_size, req);
1076         assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
1077         req.set_actual_size(adjusted_size);
1078       } else {
1079         log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1080                             " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1081       }
1082     }
1083   } else {
1084     size_t size = req.size();
1085     result = r->allocate(size, req);
1086     if (result != nullptr) {
1087       // Record actual allocation size




1088       req.set_actual_size(size);
1089     }
1090   }
1091 
1092   if (result != nullptr) {
1093     // Allocation successful, bump stats:
1094     if (req.is_mutator_alloc()) {
1095       assert(req.is_young(), "Mutator allocations always come from young generation.");
1096       _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
1097     } else {
1098       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
1099 
1100       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
1101       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
1102       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
1103       r->set_update_watermark(r->top());
1104       if (r->is_old()) {
1105         _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, req.actual_size() * HeapWordSize);
1106         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
1107         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
1108       } else {
1109         _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, req.actual_size() * HeapWordSize);
1110       }
1111     }
1112   }
1113 
1114   static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
1115   size_t ac = alloc_capacity(r);
1116 
1117   if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
1118     // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
1119     // Note that retire_from_partition() increases used to account for waste.
1120 
1121     // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
1122     // then retire the region so that subsequent searches can find available memory more quickly.
1123 
1124     size_t idx = r->index();
1125     ShenandoahFreeSetPartitionId orig_partition;
1126     if (req.is_mutator_alloc()) {
1127       orig_partition = ShenandoahFreeSetPartitionId::Mutator;
1128     } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) {
1129       orig_partition = ShenandoahFreeSetPartitionId::Collector;
1130     } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1131       orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1132     } else {
1133       assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type");
1134       if (req.is_old()) {
1135         orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1136       } else {
1137         orig_partition = ShenandoahFreeSetPartitionId::Collector;
1138       }
1139     }
1140     _partitions.retire_from_partition(orig_partition, idx, r->used());
1141     _partitions.assert_bounds();
1142   }
1143   return result;
1144 }
1145 
1146 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
1147   assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
1148   shenandoah_assert_heaplocked();
1149 
1150   size_t words_size = req.size();
1151   idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1152 
1153   assert(req.is_young(), "Humongous regions always allocated in YOUNG");
1154   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
1155 
1156   // Check if there are enough regions left to satisfy allocation.
1157   if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
1158     return nullptr;
1159   }
1160 
1161   idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
1162   idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
1163   idx_t last_possible_start = end_range - num;
1164 
1165   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
1166   // inclusive. Contiguous allocations are biased to the beginning.
1167   idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
1168                                                                           start_range, num);
1169   if (beg > last_possible_start) {
1170     // Hit the end, goodbye
1171     return nullptr;
1172   }
1173   idx_t end = beg;
1174 
1175   while (true) {
1176     // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
1177     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.  If we can extend
1178     // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
1179     while (!can_allocate_from(_heap->get_region(end))) {
1180       // region[end] is not empty, so we restart our search after region[end]
1181       idx_t slide_delta = end + 1 - beg;
1182       if (beg + slide_delta > last_possible_start) {
1183         // no room to slide
1184         return nullptr;
1185       }
1186       for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
1187         if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
1188           beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,

1215     ShenandoahHeapRegion* r = _heap->get_region(i);
1216     try_recycle_trashed(r);
1217 
1218     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
1219     assert(r->is_empty(), "Should be empty");
1220 
1221     if (i == beg) {
1222       r->make_humongous_start();
1223     } else {
1224       r->make_humongous_cont();
1225     }
1226 
1227     // Trailing region may be non-full, record the remainder there
1228     size_t used_words;
1229     if ((i == end) && (remainder != 0)) {
1230       used_words = remainder;
1231     } else {
1232       used_words = ShenandoahHeapRegion::region_size_words();
1233     }
1234 
1235     r->set_affiliation(req.affiliation());
1236     r->set_update_watermark(r->bottom());
1237     r->set_top(r->bottom() + used_words);
1238   }
1239   generation->increase_affiliated_region_count(num);
1240   if (remainder != 0) {
1241     // Record this remainder as allocation waste
1242     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
1243   }
1244 
1245   // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
1246   _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
1247 
1248   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
1249   _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
1250   _partitions.assert_bounds();
1251   req.set_actual_size(words_size);
1252   if (remainder != 0) {
1253     req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
1254   }
1255   return _heap->get_region(beg)->bottom();
1256 }
1257 
1258 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) {
1259   if (r->is_trash()) {

1260     r->recycle();
1261   }
1262 }
1263 
1264 void ShenandoahFreeSet::recycle_trash() {
1265   // lock is not reentrable, check we don't have it
1266   shenandoah_assert_not_heaplocked();
1267   size_t count = 0;
1268   for (size_t i = 0; i < _heap->num_regions(); i++) {
1269     ShenandoahHeapRegion* r = _heap->get_region(i);
1270     if (r->is_trash()) {
1271       _trash_regions[count++] = r;
1272     }
1273   }
1274 
1275   size_t total_batches = 0;
1276   jlong batch_start_time = 0;
1277   jlong recycle_trash_start_time = os::javaTimeNanos();    // This value will be treated as the initial batch_start_time
1278   jlong batch_end_time = recycle_trash_start_time;
1279   // Process as many batches as can be processed within 10 us.

1297       // to be processed between yields most of the time.
1298       //
1299       // Note that deadline is enforced since the end of previous batch.  In the case that yield() or acquisition of heap lock
1300       // takes a "long time", we will have less time to process regions, but we will always process at least one batch between
1301       // yields.  Yielding more frequently when there is heavy contention for the heap lock or for CPU cores is considered the
1302       // right thing to do.
1303       const size_t REGIONS_PER_BATCH = 32;
1304       size_t max_idx = MIN2(count, idx + REGIONS_PER_BATCH);
1305       while (idx < max_idx) {
1306         try_recycle_trashed(_trash_regions[idx++]);
1307       }
1308       total_batches++;
1309       batch_end_time = os::javaTimeNanos();
1310       // Estimate includes historic combination of yield times and heap lock acquisition times.
1311       batch_process_time_estimate = (batch_end_time - recycle_trash_start_time) / total_batches;
1312       predicted_next_batch_end_time = batch_end_time + batch_process_time_estimate;
1313     } while ((idx < count) && (predicted_next_batch_end_time < deadline));
1314   }
1315 }
1316 
1317 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
1318   size_t idx = r->index();
1319 
1320   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1321   assert(can_allocate_from(r), "Should not be allocated");
1322 
1323   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
1324   size_t region_capacity = alloc_capacity(r);
1325   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1326                                                ShenandoahFreeSetPartitionId::OldCollector, region_capacity);
1327   _partitions.assert_bounds();
1328   _heap->old_generation()->augment_evacuation_reserve(region_capacity);
1329   bool transferred = gen_heap->generation_sizer()->transfer_to_old(1);
1330   if (!transferred) {
1331     log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx);
1332     gen_heap->generation_sizer()->force_transfer_to_old(1);
1333   }
1334   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1335   // to recycle trash before attempting to allocate anything in the region.
1336 }
1337 
1338 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1339   size_t idx = r->index();
1340 
1341   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1342   assert(can_allocate_from(r), "Should not be allocated");
1343 
1344   size_t ac = alloc_capacity(r);
1345   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1346                                                ShenandoahFreeSetPartitionId::Collector, ac);
1347   _partitions.assert_bounds();
1348 
1349   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1350   // to recycle trash before attempting to allocate anything in the region.
1351 }
1352 
1353 void ShenandoahFreeSet::clear() {
1354   shenandoah_assert_heaplocked();
1355   clear_internal();
1356 }
1357 
1358 void ShenandoahFreeSet::clear_internal() {
1359   _partitions.make_all_regions_unavailable();

1360 
1361   _alloc_bias_weight = 0;
1362   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true);
1363   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Collector, false);
1364   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false);
1365 }
1366 
1367 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
1368                                                          size_t &first_old_region, size_t &last_old_region,
1369                                                          size_t &old_region_count) {
1370   clear_internal();
1371 
1372   first_old_region = _heap->num_regions();
1373   last_old_region = 0;
1374   old_region_count = 0;
1375   old_cset_regions = 0;
1376   young_cset_regions = 0;
1377 
1378   size_t region_size_bytes = _partitions.region_size_bytes();
1379   size_t max_regions = _partitions.max_regions();
1380 
1381   size_t mutator_leftmost = max_regions;
1382   size_t mutator_rightmost = 0;
1383   size_t mutator_leftmost_empty = max_regions;
1384   size_t mutator_rightmost_empty = 0;

1385   size_t mutator_regions = 0;
1386   size_t mutator_used = 0;
1387 
1388   size_t old_collector_leftmost = max_regions;
1389   size_t old_collector_rightmost = 0;
1390   size_t old_collector_leftmost_empty = max_regions;
1391   size_t old_collector_rightmost_empty = 0;
1392   size_t old_collector_regions = 0;
1393   size_t old_collector_used = 0;
1394 
1395   size_t num_regions = _heap->num_regions();
1396   for (size_t idx = 0; idx < num_regions; idx++) {
1397     ShenandoahHeapRegion* region = _heap->get_region(idx);
1398     if (region->is_trash()) {
1399       // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
1400       // The cset regions are not "trashed" until we have finished update refs.
1401       if (region->is_old()) {
1402         old_cset_regions++;
1403       } else {
1404         assert(region->is_young(), "Trashed region should be old or young");
1405         young_cset_regions++;
1406       }
1407     } else if (region->is_old()) {
1408       // count both humongous and regular regions, but don't count trash (cset) regions.
1409       old_region_count++;
1410       if (first_old_region > idx) {
1411         first_old_region = idx;
1412       }
1413       last_old_region = idx;
1414     }
1415     if (region->is_alloc_allowed() || region->is_trash()) {
1416       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1417 
1418       // Do not add regions that would almost surely fail allocation
1419       size_t ac = alloc_capacity(region);
1420       if (ac > PLAB::min_size() * HeapWordSize) {
1421         if (region->is_trash() || !region->is_old()) {
1422           // Both young and old collected regions (trashed) are placed into the Mutator set
1423           _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
1424           if (idx < mutator_leftmost) {
1425             mutator_leftmost = idx;
1426           }
1427           if (idx > mutator_rightmost) {
1428             mutator_rightmost = idx;
1429           }
1430           if (ac == region_size_bytes) {
1431             if (idx < mutator_leftmost_empty) {
1432               mutator_leftmost_empty = idx;
1433             }
1434             if (idx > mutator_rightmost_empty) {
1435               mutator_rightmost_empty = idx;
1436             }
1437           }
1438           mutator_regions++;
1439           mutator_used += (region_size_bytes - ac);
1440         } else {
1441           // !region->is_trash() && region is_old()
1442           _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::OldCollector);
1443           if (idx < old_collector_leftmost) {
1444             old_collector_leftmost = idx;
1445           }
1446           if (idx > old_collector_rightmost) {
1447             old_collector_rightmost = idx;
1448           }
1449           if (ac == region_size_bytes) {
1450             if (idx < old_collector_leftmost_empty) {
1451               old_collector_leftmost_empty = idx;
1452             }
1453             if (idx > old_collector_rightmost_empty) {
1454               old_collector_rightmost_empty = idx;
1455             }
1456           }
1457           old_collector_regions++;
1458           old_collector_used += (region_size_bytes - ac);
1459         }







1460       }
1461     }
1462   }
1463   log_debug(gc)("  At end of prep_to_rebuild, mutator_leftmost: " SIZE_FORMAT
1464                 ", mutator_rightmost: " SIZE_FORMAT
1465                 ", mutator_leftmost_empty: " SIZE_FORMAT
1466                 ", mutator_rightmost_empty: " SIZE_FORMAT
1467                 ", mutator_regions: " SIZE_FORMAT
1468                 ", mutator_used: " SIZE_FORMAT,
1469                 mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
1470                 mutator_regions, mutator_used);
1471 
1472   log_debug(gc)("  old_collector_leftmost: " SIZE_FORMAT
1473                 ", old_collector_rightmost: " SIZE_FORMAT
1474                 ", old_collector_leftmost_empty: " SIZE_FORMAT
1475                 ", old_collector_rightmost_empty: " SIZE_FORMAT
1476                 ", old_collector_regions: " SIZE_FORMAT
1477                 ", old_collector_used: " SIZE_FORMAT,
1478                 old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty,
1479                 old_collector_regions, old_collector_used);
1480 
1481   idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost;
1482   idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty;
1483   _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx,
1484                                           mutator_regions, mutator_used);
1485   rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost;
1486   rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty;
1487   _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, old_collector_leftmost_empty,
1488                                                 rightmost_empty_idx, old_collector_regions, old_collector_used);
1489   log_debug(gc)("  After find_regions_with_alloc_capacity(), Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1490                 "  Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1491                 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1492                 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1493                 _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1494                 _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1495 }
1496 
1497 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1498 size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
1499                                                                                    size_t max_xfer_regions,
1500                                                                                    size_t& bytes_transferred) {
1501   shenandoah_assert_heaplocked();
1502   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1503   size_t transferred_regions = 0;
1504   ShenandoahLeftRightIterator iterator(&_partitions, which_collector, true);
1505   idx_t rightmost = _partitions.rightmost_empty(which_collector);
1506   for (idx_t idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) {
1507     // Note: can_allocate_from() denotes that region is entirely empty
1508     if (can_allocate_from(idx)) {
1509       _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1510       transferred_regions++;
1511       bytes_transferred += region_size_bytes;
1512     }
1513   }
1514   return transferred_regions;
1515 }
1516 
1517 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1518 size_t ShenandoahFreeSet::transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
1519                                                                                        size_t max_xfer_regions,
1520                                                                                        size_t& bytes_transferred) {
1521   shenandoah_assert_heaplocked();
1522   size_t transferred_regions = 0;
1523   ShenandoahLeftRightIterator iterator(&_partitions, which_collector, false);
1524   for (idx_t idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) {
1525     size_t ac = alloc_capacity(idx);
1526     if (ac > 0) {
1527       _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, ac);
1528       transferred_regions++;
1529       bytes_transferred += ac;
1530     }
1531   }
1532   return transferred_regions;
1533 }
1534 
1535 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
1536   size_t collector_xfer = 0;
1537   size_t old_collector_xfer = 0;

1538 
1539   // Process empty regions within the Collector free partition
1540   if ((max_xfer_regions > 0) &&
1541       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1542        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1543     ShenandoahHeapLocker locker(_heap->lock());
1544     max_xfer_regions -=
1545       transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1546                                                                collector_xfer);
1547   }
1548 
1549   // Process empty regions within the OldCollector free partition
1550   if ((max_xfer_regions > 0) &&
1551       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)
1552        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector))) {
1553     ShenandoahHeapLocker locker(_heap->lock());
1554     size_t old_collector_regions =
1555       transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions,
1556                                                                old_collector_xfer);
1557     max_xfer_regions -= old_collector_regions;
1558     if (old_collector_regions > 0) {
1559       ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions);
1560     }
1561   }
1562 
1563   // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1564   if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1565                                  <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1566     ShenandoahHeapLocker locker(_heap->lock());
1567     max_xfer_regions -=
1568       transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1569                                                                    collector_xfer);











1570   }
1571 
1572   size_t total_xfer = collector_xfer + old_collector_xfer;
1573   log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1574                      SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1575                      byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1576                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1577                      byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer));
1578 }
1579 
1580 
1581 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1582 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
1583                                            size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
1584   shenandoah_assert_heaplocked();
1585   // This resets all state information, removing all regions from all sets.
1586   clear();
1587   log_debug(gc, free)("Rebuilding FreeSet");
1588 
1589   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1590   // mutator set otherwise.  All trashed (cset) regions are affiliated young and placed in mutator set.
1591   find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
1592 }
1593 
1594 void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count) {
1595   assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity");
1596   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1597     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
1598     ShenandoahOldGeneration* old_gen = heap->old_generation();
1599     ShenandoahYoungGeneration* young_gen = heap->young_generation();
1600     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1601 
1602     size_t original_old_capacity = old_gen->max_capacity();
1603     size_t new_old_capacity = old_region_count * region_size_bytes;
1604     size_t new_young_capacity = young_region_count * region_size_bytes;
1605     old_gen->set_capacity(new_old_capacity);
1606     young_gen->set_capacity(new_young_capacity);
1607 
1608     if (new_old_capacity > original_old_capacity) {
1609       size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes;
1610       log_info(gc, ergo)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
1611                          region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity));
1612     } else if (new_old_capacity < original_old_capacity) {
1613       size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes;
1614       log_info(gc, ergo)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
1615                          region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity));
1616     }
1617     // This balances generations, so clear any pending request to balance.
1618     old_gen->set_region_balance(0);
1619   }
1620 }
1621 
1622 void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count,
1623                                        bool have_evacuation_reserves) {
1624   shenandoah_assert_heaplocked();
1625   size_t young_reserve(0), old_reserve(0);
1626 
1627   if (_heap->mode()->is_generational()) {
1628     compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
1629                                    young_reserve, old_reserve);






1630   } else {
1631     young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1632     old_reserve = 0;
1633   }
1634 
1635   // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy
1636   // young_reserve and old_reserve.
1637   reserve_regions(young_reserve, old_reserve, old_region_count);
1638   size_t young_region_count = _heap->num_regions() - old_region_count;
1639   establish_generation_sizes(young_region_count, old_region_count);
1640   establish_old_collector_alloc_bias();
1641   _partitions.assert_bounds();
1642   log_status();
1643 }
1644 
1645 void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
1646                                                        bool have_evacuation_reserves,
1647                                                        size_t& young_reserve_result, size_t& old_reserve_result) const {
1648   shenandoah_assert_generational();
1649   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1650 
1651   ShenandoahOldGeneration* const old_generation = _heap->old_generation();
1652   size_t old_available = old_generation->available();
1653   size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions();
1654   ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
1655   size_t young_capacity = young_generation->max_capacity();
1656   size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
1657 
1658   // Add in the regions we anticipate to be freed by evacuation of the collection set
1659   old_unaffiliated_regions += old_cset_regions;
1660   young_unaffiliated_regions += young_cset_regions;
1661 
1662   // Consult old-region balance to make adjustments to current generation capacities and availability.
1663   // The generation region transfers take place after we rebuild.
1664   const ssize_t old_region_balance = old_generation->get_region_balance();
1665   if (old_region_balance != 0) {
1666 #ifdef ASSERT
1667     if (old_region_balance > 0) {
1668       assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1669     } else {
1670       assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1671     }
1672 #endif
1673 
1674     ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
1675     old_available -= xfer_bytes;
1676     old_unaffiliated_regions -= old_region_balance;
1677     young_capacity += xfer_bytes;
1678     young_unaffiliated_regions += old_region_balance;
1679   }
1680 
1681   // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1682   // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
1683   // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
1684   // each PLAB's available memory.
1685   if (have_evacuation_reserves) {
1686     // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1687     const size_t promoted_reserve = old_generation->get_promoted_reserve();
1688     const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
1689     young_reserve_result = young_generation->get_evacuation_reserve();
1690     old_reserve_result = promoted_reserve + old_evac_reserve;
1691     assert(old_reserve_result <= old_available,
1692            "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1693            promoted_reserve, old_evac_reserve, old_available);
1694   } else {
1695     // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1696     young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
1697     // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1698     // Affiliated old-gen regions are already in the OldCollector free set.  Add in the relevant number of
1699     // unaffiliated regions.
1700     old_reserve_result = old_available;
1701   }
1702 
1703   // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1704   // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
1705   // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1706   // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1707   if (old_reserve_result >
1708       _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1709     old_reserve_result =
1710       _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
1711   }
1712 
1713   if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
1714     young_reserve_result = young_unaffiliated_regions * region_size_bytes;
1715   }
1716 }
1717 
1718 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1719 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1720 // into the collector set or old collector set in order to assure that the memory available for allocations within
1721 // the collector set is at least to_reserve and the memory available for allocations within the old collector set
1722 // is at least to_reserve_old.
1723 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) {
1724   for (size_t i = _heap->num_regions(); i > 0; i--) {
1725     size_t idx = i - 1;
1726     ShenandoahHeapRegion* r = _heap->get_region(idx);

1727     if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1728       continue;
1729     }
1730 
1731     size_t ac = alloc_capacity(r);
1732     assert (ac > 0, "Membership in free set implies has capacity");
1733     assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD");
1734 
1735     bool move_to_old_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old;
1736     bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1737 
1738     if (!move_to_collector && !move_to_old_collector) {
1739       // We've satisfied both to_reserve and to_reserved_old
1740       break;
1741     }
1742 
1743     if (move_to_old_collector) {
1744       // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher
1745       // addresses than Collector regions.  Presumably, OldCollector regions are more "stable" and less likely to
1746       // be collected in the near future.
1747       if (r->is_trash() || !r->is_affiliated()) {
1748         // OLD regions that have available memory are already in the old_collector free set.
1749         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1750                                                      ShenandoahFreeSetPartitionId::OldCollector, ac);
1751         log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1752         log_debug(gc)("  Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1753                       "  Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1754                       _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1755                       _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1756                       _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1757                       _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1758         old_region_count++;
1759         continue;
1760       }
1761     }
1762 
1763     if (move_to_collector) {
1764       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1765       // they were entirely empty.  This has the effect of causing new Mutator allocation to reside next to objects
1766       // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1767       // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1768       // survivor objects is less likely to be selected for the collection set.  This alternative implementation allows
1769       // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1770       // occupy regions comprised entirely of ephemeral objects.  These regions are highly likely to be included in the next
1771       // collection set, and they are easily evacuated because they have low density of live objects.
1772       _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1773                                                    ShenandoahFreeSetPartitionId::Collector, ac);
1774       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1775       log_debug(gc)("  Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1776                     "  Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1777                     _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1778                     _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1779                     _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1780                     _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));
1781     }
1782   }
1783 
1784   if (LogTarget(Info, gc, free)::is_enabled()) {
1785     size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
1786     if (old_reserve < to_reserve_old) {
1787       log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
1788                          PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
1789     }
1790     size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
1791     if (reserve < to_reserve) {
1792       log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1793                     PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
1794     }
1795   }
1796 }
1797 
1798 void ShenandoahFreeSet::establish_old_collector_alloc_bias() {
1799   ShenandoahHeap* heap = ShenandoahHeap::heap();
1800   shenandoah_assert_heaplocked();
1801 
1802   idx_t left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
1803   idx_t right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector);
1804   idx_t middle = (left_idx + right_idx) / 2;
1805   size_t available_in_first_half = 0;
1806   size_t available_in_second_half = 0;
1807 
1808   for (idx_t index = left_idx; index < middle; index++) {
1809     if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1810       ShenandoahHeapRegion* r = heap->get_region((size_t) index);
1811       available_in_first_half += r->free();
1812     }
1813   }
1814   for (idx_t index = middle; index <= right_idx; index++) {
1815     if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1816       ShenandoahHeapRegion* r = heap->get_region(index);
1817       available_in_second_half += r->free();
1818     }
1819   }
1820 
1821   // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
1822   // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
1823   // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
1824   // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
1825   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector,
1826                                           (available_in_second_half > available_in_first_half));
1827 }
1828 
1829 void ShenandoahFreeSet::log_status_under_lock() {
1830   // Must not be heap locked, it acquires heap lock only when log is enabled
1831   shenandoah_assert_not_heaplocked();
1832   if (LogTarget(Info, gc, free)::is_enabled()
1833       DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
1834     ShenandoahHeapLocker locker(_heap->lock());
1835     log_status();
1836   }
1837 }
1838 
1839 void ShenandoahFreeSet::log_status() {
1840   shenandoah_assert_heaplocked();
1841 
1842 #ifdef ASSERT
1843   // Dump of the FreeSet details is only enabled if assertions are enabled
1844   if (LogTarget(Debug, gc, free)::is_enabled()) {
1845 #define BUFFER_SIZE 80





1846 
1847     char buffer[BUFFER_SIZE];
1848     for (uint i = 0; i < BUFFER_SIZE; i++) {
1849       buffer[i] = '\0';
1850     }
1851 
1852     log_debug(gc)("FreeSet map legend:"
1853                        " M:mutator_free C:collector_free O:old_collector_free"
1854                        " H:humongous ~:retired old _:retired young");
1855     log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocating from %s, "
1856                   " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1857                   "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
1858                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1859                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1860                   _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left",
1861                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1862                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector),
1863                   _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1864                   _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector),
1865                   _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left");
1866 
1867     for (uint i = 0; i < _heap->num_regions(); i++) {
1868       ShenandoahHeapRegion *r = _heap->get_region(i);
1869       uint idx = i % 64;
1870       if ((i != 0) && (idx == 0)) {
1871         log_debug(gc)(" %6u: %s", i-64, buffer);
1872       }
1873       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
1874         size_t capacity = alloc_capacity(r);
1875         assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in mutator_free set");
1876         buffer[idx] = (capacity == ShenandoahHeapRegion::region_size_bytes()) ? 'M' : 'm';

1877       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
1878         size_t capacity = alloc_capacity(r);
1879         assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in collector_free set");
1880         buffer[idx] = (capacity == ShenandoahHeapRegion::region_size_bytes()) ? 'C' : 'c';
1881       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, i)) {
1882         size_t capacity = alloc_capacity(r);
1883         buffer[idx] = (capacity == ShenandoahHeapRegion::region_size_bytes()) ? 'O' : 'o';
1884       } else if (r->is_humongous()) {
1885         if (r->is_old()) {
1886           buffer[idx] = 'H';
1887         } else {
1888           buffer[idx] = 'h';
1889         }
1890       } else {
1891         if (r->is_old()) {
1892           buffer[idx] = '~';
1893         } else {
1894           buffer[idx] = '_';
1895         }
1896       }
1897     }
1898     uint remnant = _heap->num_regions() % 64;
1899     if (remnant > 0) {
1900       buffer[remnant] = '\0';
1901     } else {
1902       remnant = 64;
1903     }
1904     log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1905   }
1906 #endif
1907 
1908   LogTarget(Info, gc, free) lt;
1909   if (lt.is_enabled()) {
1910     ResourceMark rm;
1911     LogStream ls(lt);
1912 
1913     {
1914       idx_t last_idx = 0;
1915       size_t max = 0;

1933             } else {
1934               empty_contig = 1;
1935             }
1936           } else {
1937             empty_contig = 0;
1938           }
1939           total_used += r->used();
1940           total_free += free;
1941           max_contig = MAX2(max_contig, empty_contig);
1942           last_idx = idx;
1943         }
1944       }
1945 
1946       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1947       size_t free = capacity() - used();
1948 
1949       // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1950       // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1951       // my internally tracked values of used() and free().
1952       assert(free == total_free, "Free memory should match");

1953       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1954                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
1955                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1956                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1957       );
1958 
1959       ls.print("Frag: ");
1960       size_t frag_ext;
1961       if (total_free_ext > 0) {
1962         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1963       } else {
1964         frag_ext = 0;
1965       }
1966       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1967 
1968       size_t frag_int;
1969       if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1970         frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1971                     / ShenandoahHeapRegion::region_size_bytes());
1972       } else {
1973         frag_int = 0;
1974       }

1981     {
1982       size_t max = 0;
1983       size_t total_free = 0;
1984       size_t total_used = 0;
1985 
1986       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1987            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1988         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
1989           ShenandoahHeapRegion *r = _heap->get_region(idx);
1990           size_t free = alloc_capacity(r);
1991           max = MAX2(max, free);
1992           total_free += free;
1993           total_used += r->used();
1994         }
1995       }
1996       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1997                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1998                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1999                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
2000     }
2001 
2002     if (_heap->mode()->is_generational()) {
2003       size_t max = 0;
2004       size_t total_free = 0;
2005       size_t total_used = 0;
2006 
2007       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
2008            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); idx++) {
2009         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, idx)) {
2010           ShenandoahHeapRegion *r = _heap->get_region(idx);
2011           size_t free = alloc_capacity(r);
2012           max = MAX2(max, free);
2013           total_free += free;
2014           total_used += r->used();
2015         }
2016       }
2017       ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
2018                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
2019                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
2020                   byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
2021     }
2022   }
2023 }
2024 
2025 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
2026   shenandoah_assert_heaplocked();
2027   if (ShenandoahHeapRegion::requires_humongous(req.size())) {
2028     switch (req.type()) {
2029       case ShenandoahAllocRequest::_alloc_shared:
2030       case ShenandoahAllocRequest::_alloc_shared_gc:
2031         in_new_region = true;
2032         return allocate_contiguous(req);
2033       case ShenandoahAllocRequest::_alloc_plab:
2034       case ShenandoahAllocRequest::_alloc_gclab:
2035       case ShenandoahAllocRequest::_alloc_tlab:
2036         in_new_region = false;
2037         assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size());
2038         return nullptr;
2039       default:
2040         ShouldNotReachHere();
2041         return nullptr;
2042     }
2043   } else {
2044     return allocate_single(req, in_new_region);
2045   }
2046 }
2047 
2048 void ShenandoahFreeSet::print_on(outputStream* out) const {
2049   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
2050   ShenandoahLeftRightIterator mutator(const_cast<ShenandoahRegionPartitions*>(&_partitions), ShenandoahFreeSetPartitionId::Mutator);
2051   for (idx_t index = mutator.current(); mutator.has_next(); index = mutator.next()) {


2052     _heap->get_region(index)->print_on(out);

2053   }
2054 
2055   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
2056   ShenandoahLeftRightIterator collector(const_cast<ShenandoahRegionPartitions*>(&_partitions), ShenandoahFreeSetPartitionId::Collector);
2057   for (idx_t index = collector.current(); collector.has_next(); index = collector.next()) {


2058     _heap->get_region(index)->print_on(out);
2059   }
2060 
2061   if (_heap->mode()->is_generational()) {
2062     out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector));
2063     for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
2064          index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) {
2065       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
2066         _heap->get_region(index)->print_on(out);
2067       }
2068     }
2069   }
2070 }
2071 
2072 double ShenandoahFreeSet::internal_fragmentation() {
2073   double squared = 0;
2074   double linear = 0;
2075 
2076   ShenandoahLeftRightIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator);
2077   for (idx_t index = iterator.current(); iterator.has_next(); index = iterator.next()) {


2078     ShenandoahHeapRegion* r = _heap->get_region(index);
2079     size_t used = r->used();
2080     squared += used * used;
2081     linear += used;

2082   }
2083 
2084   if (linear > 0) {
2085     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
2086     return 1 - s;
2087   } else {
2088     return 0;
2089   }
2090 }
2091 
2092 double ShenandoahFreeSet::external_fragmentation() {
2093   idx_t last_idx = 0;
2094   size_t max_contig = 0;
2095   size_t empty_contig = 0;

2096   size_t free = 0;
2097 
2098   ShenandoahLeftRightIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator);
2099   for (idx_t index = iterator.current(); iterator.has_next(); index = iterator.next()) {


2100     ShenandoahHeapRegion* r = _heap->get_region(index);
2101     if (r->is_empty()) {
2102       free += ShenandoahHeapRegion::region_size_bytes();
2103       if (last_idx + 1 == index) {
2104         empty_contig++;
2105       } else {
2106         empty_contig = 1;
2107       }
2108     } else {
2109       empty_contig = 0;
2110     }
2111     max_contig = MAX2(max_contig, empty_contig);
2112     last_idx = index;

2113   }
2114 
2115   if (free > 0) {
2116     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
2117   } else {
2118     return 0;
2119   }
2120 }
2121 
< prev index next >