< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page

   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"


  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"



  32 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
  33 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
  34 #include "logging/logStream.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "runtime/orderAccess.hpp"
  37 
  38 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
  39   switch (t) {
  40     case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
  41     case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
  42     case ShenandoahFreeSetPartitionId::Collector: return "Collector";

  43     default:
  44       ShouldNotReachHere();
  45       return "Unrecognized";
  46   }
  47 }
  48 
  49 #ifndef PRODUCT
  50 void ShenandoahRegionPartitions::dump_bitmap() const {
  51   log_info(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",

  52                _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  53                _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  54                _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
  55                _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]);


  56   log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  57                "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",

  58                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  59                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  60                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
  61                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);


  62 
  63   log_info(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits");
  64   dump_bitmap_range(0, _max-1);
  65 }
  66 
  67 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
  68   assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
  69   assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
  70   idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
  71   idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
  72   idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
  73   while (aligned_start <= aligned_end) {
  74     dump_bitmap_row(aligned_start);
  75     aligned_start += alignment;
  76   }
  77 }
  78 
  79 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
  80   assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
  81   idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
  82   uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
  83   uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
  84   uintx free_bits = mutator_bits | collector_bits;

  85   uintx notfree_bits =  ~free_bits;
  86   log_info(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
  87                aligned_idx, mutator_bits, collector_bits, notfree_bits);
  88 }
  89 #endif
  90 
  91 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
  92     _max(max_regions),
  93     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
  94     _free_set(free_set),
  95     _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) }
  96 {
  97   make_all_regions_unavailable();
  98 }
  99 
 100 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 101   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 102 }
 103 
 104 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 105   ShenandoahHeapRegion* r = _heap->get_region(idx);
 106   return can_allocate_from(r);
 107 }
 108 
 109 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 110   if (r->is_trash()) {
 111     // This would be recycled on allocation path
 112     return ShenandoahHeapRegion::region_size_bytes();
 113   } else {
 114     return r->free();
 115   }

 145   // which_partition is shrinking after the region that used to be leftmost is retired.
 146   return idx;
 147 }
 148 
 149 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
 150   for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
 151     _membership[partition_id].clear_all();
 152     _leftmosts[partition_id] = _max;
 153     _rightmosts[partition_id] = -1;
 154     _leftmosts_empty[partition_id] = _max;
 155     _rightmosts_empty[partition_id] = -1;;
 156     _capacity[partition_id] = 0;
 157     _used[partition_id] = 0;
 158   }
 159   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 160 }
 161 
 162 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
 163                                                              idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
 164                                                              size_t mutator_region_count, size_t mutator_used) {
 165   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 166   _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
 167   _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
 168   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
 169   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
 170 
 171   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 172   _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
 173   _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
 174 
 175   _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 176   _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 177   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 178   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 179 
 180   _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 181   _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 182   _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 183 }
 184 














 185 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
 186   assert (which_partition < NumPartitions, "Partition must be valid");
 187   _used[int(which_partition)] += bytes;
 188   assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
 189           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
 190           _used[int(which_partition)], _capacity[int(which_partition)], bytes);
 191 }
 192 
 193 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
 194   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 195   assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
 196   if (low_idx == leftmost(partition)) {
 197     assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
 198     if (high_idx + 1 == _max) {
 199       _leftmosts[int(partition)] = _max;
 200     } else {
 201       _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
 202     }
 203     if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
 204       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
 205       _leftmosts_empty[int(partition)] = leftmost(partition);
 206     }
 207   }
 208   if (high_idx == _rightmosts[int(partition)]) {
 209     assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
 210     if (low_idx == 0) {
 211       _rightmosts[int(partition)] = -1;
 212     } else {
 213       _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
 214     }
 215     if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
 216       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 217       _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
 218     }
 219   }
 220   if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
 221     _leftmosts[int(partition)] = _max;
 222     _rightmosts[int(partition)] = -1;
 223     _leftmosts_empty[int(partition)] = _max;
 224     _rightmosts_empty[int(partition)] = -1;
 225   }

 272 
 273   if (used_bytes < _region_size_bytes) {
 274     // Count the alignment pad remnant of memory as used when we retire this region
 275     increase_used(partition, _region_size_bytes - used_bytes);
 276   }
 277   _membership[int(partition)].clear_bit(idx);
 278   shrink_interval_if_boundary_modified(partition, idx);
 279   _region_counts[int(partition)]--;
 280 }
 281 
 282 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
 283   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 284   assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
 285   assert (which_partition < NumPartitions, "selected free partition must be valid");
 286   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 287 
 288   _membership[int(which_partition)].set_bit(idx);
 289   _capacity[int(which_partition)] += _region_size_bytes;
 290   _used[int(which_partition)] += _region_size_bytes - available;
 291   expand_interval_if_boundary_modified(which_partition, idx, available);
 292 
 293   _region_counts[int(which_partition)]++;
 294 }
 295 

















 296 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
 297                                                                   ShenandoahFreeSetPartitionId new_partition, size_t available) {

 298   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 299   assert (orig_partition < NumPartitions, "Original partition must be valid");
 300   assert (new_partition < NumPartitions, "New partition must be valid");
 301   assert (available <= _region_size_bytes, "Available cannot exceed region size");





 302 
 303   // Expected transitions:
 304   //  During rebuild:         Mutator => Collector


 305   //  During flip_to_gc:      Mutator empty => Collector

 306   // At start of update refs: Collector => Mutator
 307   assert (((available <= _region_size_bytes) &&
 308            (((orig_partition == ShenandoahFreeSetPartitionId::Mutator)
 309              && (new_partition == ShenandoahFreeSetPartitionId::Collector)) ||
 310             ((orig_partition == ShenandoahFreeSetPartitionId::Collector)
 311              && (new_partition == ShenandoahFreeSetPartitionId::Mutator)))) ||
 312           ((available == _region_size_bytes) &&
 313            ((orig_partition == ShenandoahFreeSetPartitionId::Mutator)
 314             && (new_partition == ShenandoahFreeSetPartitionId::Collector))), "Unexpected movement between partitions");


 315 
 316   size_t used = _region_size_bytes - available;



 317 
 318   _membership[int(orig_partition)].clear_bit(idx);
 319   _membership[int(new_partition)].set_bit(idx);
 320 
 321   _capacity[int(orig_partition)] -= _region_size_bytes;
 322   _used[int(orig_partition)] -= used;
 323   shrink_interval_if_boundary_modified(orig_partition, idx);
 324 
 325   _capacity[int(new_partition)] += _region_size_bytes;;
 326   _used[int(new_partition)] += used;
 327   expand_interval_if_boundary_modified(new_partition, idx, available);
 328 
 329   _region_counts[int(orig_partition)]--;
 330   _region_counts[int(new_partition)]++;
 331 }
 332 
 333 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
 334   return partition_name(membership(idx));
 335 }
 336 

 465   idx_t leftmosts[UIntNumPartitions];
 466   idx_t rightmosts[UIntNumPartitions];
 467   idx_t empty_leftmosts[UIntNumPartitions];
 468   idx_t empty_rightmosts[UIntNumPartitions];
 469 
 470   for (uint i = 0; i < UIntNumPartitions; i++) {
 471     leftmosts[i] = _max;
 472     empty_leftmosts[i] = _max;
 473     rightmosts[i] = -1;
 474     empty_rightmosts[i] = -1;
 475   }
 476 
 477   for (idx_t i = 0; i < _max; i++) {
 478     ShenandoahFreeSetPartitionId partition = membership(i);
 479     switch (partition) {
 480       case ShenandoahFreeSetPartitionId::NotFree:
 481         break;
 482 
 483       case ShenandoahFreeSetPartitionId::Mutator:
 484       case ShenandoahFreeSetPartitionId::Collector:

 485       {
 486         size_t capacity = _free_set->alloc_capacity(i);
 487         bool is_empty = (capacity == _region_size_bytes);
 488         assert(capacity > 0, "free regions must have allocation capacity");
 489         if (i < leftmosts[int(partition)]) {
 490           leftmosts[int(partition)] = i;
 491         }
 492         if (is_empty && (i < empty_leftmosts[int(partition)])) {
 493           empty_leftmosts[int(partition)] = i;
 494         }
 495         if (i > rightmosts[int(partition)]) {
 496           rightmosts[int(partition)] = i;
 497         }
 498         if (is_empty && (i > empty_rightmosts[int(partition)])) {
 499           empty_rightmosts[int(partition)] = i;
 500         }
 501         break;
 502       }
 503 
 504       default:

 554 
 555   // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 556   // Likewise for empty region partitions.
 557   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 558   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 559   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
 560           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 561           beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
 562   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
 563           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 564           end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
 565 
 566   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 567   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 568   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 569           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 570           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
 571   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 572           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 573           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));



































 574 }
 575 #endif
 576 
 577 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 578   _heap(heap),
 579   _partitions(max_regions, this),
 580   _right_to_left_bias(false),
 581   _alloc_bias_weight(0)
 582 {
 583   clear_internal();
 584 }
 585 




















































 586 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 587   shenandoah_assert_heaplocked();
 588 
 589   // Scan the bitmap looking for a first fit.
 590   //
 591   // Leftmost and rightmost bounds provide enough caching to quickly find a region from which to allocate.

 592   //
 593   // Allocations are biased: GC allocations are taken from the high end of the heap.  Regular (and TLAB)
 594   // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
 595   // Humongous mutator allocations are taken from the bottom of the heap.
 596   //
 597   // Free set maintains mutator and collector partitions.  Mutator can only allocate from the
 598   // Mutator partition.  Collector prefers to allocate from the Collector partition, but may steal
 599   // regions from the Mutator partition if the Collector partition has been depleted.




















 600 





 601   switch (req.type()) {
 602     case ShenandoahAllocRequest::_alloc_tlab:
 603     case ShenandoahAllocRequest::_alloc_shared: {
 604       // Try to allocate in the mutator view
 605       if (_alloc_bias_weight-- <= 0) {
 606         // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
 607         // of the heap.  Typically, these are the more recently engaged regions and the objects in these regions have not
 608         // yet had a chance to die (and/or are treated as floating garbage).  If we use the same allocation bias on each
 609         // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
 610         // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
 611         // during the most recent GC pass may once again prevent the region from being collected.  We have found that
 612         // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
 613         // benchmarks.  In the best case, this has the effect of consuming these partially consumed regions before
 614         // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
 615         //
 616         // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
 617         // available regions.  Other potential benefits:
 618         //  1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
 619         //  2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
 620         //     late in the GC cycle.
 621         idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
 622                                      - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
 623         idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
 624                                       - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 625         _right_to_left_bias = (non_empty_on_right > non_empty_on_left);
 626         _alloc_bias_weight = _InitialAllocBiasWeight;
 627       }
 628       if (_right_to_left_bias) {
 629         // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
 630         if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 631           // Use signed idx.  Otherwise, loop will never terminate.
 632           idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
 633           for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
 634             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 635                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 636             ShenandoahHeapRegion* r = _heap->get_region(idx);
 637             // try_allocate_in() increases used if the allocation is successful.
 638             HeapWord* result;
 639             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 640             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 641               return result;
 642             }
 643             idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 644           }
 645         }
 646       } else {
 647         // Allocate from low to high memory.  This keeps the range of fully empty regions more tightly packed.
 648         // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle.  So this

 653           for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
 654             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 655                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 656             ShenandoahHeapRegion* r = _heap->get_region(idx);
 657             // try_allocate_in() increases used if the allocation is successful.
 658             HeapWord* result;
 659             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 660             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 661               return result;
 662             }
 663             idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
 664           }
 665         }
 666       }
 667       // There is no recovery. Mutator does not touch collector view at all.
 668       break;
 669     }
 670     case ShenandoahAllocRequest::_alloc_gclab:
 671       // GCLABs are for evacuation so we must be in evacuation phase.
 672 




 673     case ShenandoahAllocRequest::_alloc_shared_gc: {
 674       // Fast-path: try to allocate in the collector view first
 675       idx_t leftmost_collector = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
 676       for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx >= leftmost_collector; ) {
 677         assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
 678                "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 679         HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);






 680         if (result != nullptr) {
 681           return result;
 682         }
 683         idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Collector, idx - 1);
 684       }
 685 
 686       // No dice. Can we borrow space from mutator view?
 687       if (!ShenandoahEvacReserveOverflow) {
 688         return nullptr;
 689       }




 690 
 691       // Try to steal an empty region from the mutator view.
 692       idx_t leftmost_mutator_empty = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 693       for (idx_t idx = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost_mutator_empty; ) {
 694         assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 695                "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 696         ShenandoahHeapRegion* r = _heap->get_region(idx);
 697         if (can_allocate_from(r)) {
 698           flip_to_gc(r);
 699           HeapWord *result = try_allocate_in(r, req, in_new_region);
 700           if (result != nullptr) {
 701             log_debug(gc)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 702             return result;



















 703           }

 704         }
 705         idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 706       }
 707 
 708       // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
 709       // due to GC allocations would expose unparsable mutator allocations.
 710       break;
 711     }

 712     default:
 713       ShouldNotReachHere();
 714   }
 715   return nullptr;
 716 }
 717 


















































 718 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 719   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 720   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
 721     return nullptr;
 722   }
 723 
 724   HeapWord* result = nullptr;
 725   try_recycle_trashed(r);
 726   in_new_region = r->is_empty();
 727 
 728   if (in_new_region) {
 729     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 730                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));




























 731   }
 732 
 733   // req.size() is in words, r->free() is in bytes.
 734   if (req.is_lab_alloc()) {
 735     // This is a GCLAB or a TLAB allocation
 736     size_t adjusted_size = req.size();
 737     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 738     if (adjusted_size > free) {
 739       adjusted_size = free;
 740     }
 741     if (adjusted_size >= req.min_size()) {
 742       result = r->allocate(adjusted_size, req.type());
 743       log_debug(gc)("Allocated " SIZE_FORMAT " words (adjusted from " SIZE_FORMAT ") for %s @" PTR_FORMAT
 744                           " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT,
 745                           adjusted_size, req.size(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result),
 746                           _partitions.partition_membership_name(r->index()), r->index(), r->free());
 747       assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
 748       req.set_actual_size(adjusted_size);











 749     } else {
 750       log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
 751                           " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());












 752     }
 753   } else {
 754     size_t size = req.size();
 755     result = r->allocate(size, req.type());
 756     if (result != nullptr) {
 757       // Record actual allocation size
 758       log_debug(gc)("Allocated " SIZE_FORMAT " words for %s @" PTR_FORMAT
 759                           " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT,
 760                           size, ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result),
 761                           _partitions.partition_membership_name(r->index()),  r->index(), r->free());
 762       req.set_actual_size(size);
 763     }
 764   }
 765 
 766   if (result != nullptr) {
 767     // Allocation successful, bump stats:
 768     if (req.is_mutator_alloc()) {

 769       _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
 770     } else {
 771       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
 772 
 773       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
 774       // evacuation are not updated during evacuation.





 775       r->set_update_watermark(r->top());







 776     }
 777   }
 778 
 779   static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
 780   size_t ac = alloc_capacity(r);
 781 
 782   if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
 783     // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
 784     // Note that retire_from_partition() increases used to account for waste.
 785 
 786     // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
 787     // then retire the region so that subsequent searches can find available memory more quickly.
 788 
 789     size_t idx = r->index();
 790     _partitions.retire_from_partition(req.is_mutator_alloc()?
 791                                       ShenandoahFreeSetPartitionId::Mutator: ShenandoahFreeSetPartitionId::Collector,
 792                                       idx, r->used());













 793     _partitions.assert_bounds();
 794   }
 795   return result;
 796 }
 797 
 798 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 799   assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
 800   shenandoah_assert_heaplocked();
 801 
 802   size_t words_size = req.size();
 803   idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 804 



 805   // Check if there are enough regions left to satisfy allocation.
 806   if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
 807     return nullptr;
 808   }
 809 
 810   idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 811   idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
 812   idx_t last_possible_start = end_range - num;
 813 
 814   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 815   // inclusive. Contiguous allocations are biased to the beginning.
 816   idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
 817                                                                             start_range, num);
 818   if (beg > last_possible_start) {
 819     // Hit the end, goodbye
 820     return nullptr;
 821   }
 822   idx_t end = beg;
 823 
 824   while (true) {
 825     // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
 826     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.  If we can extend
 827     // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
 828     while (!can_allocate_from(_heap->get_region(end))) {
 829       // region[end] is not empty, so we restart our search after region[end]
 830       idx_t slide_delta = end + 1 - beg;
 831       if (beg + slide_delta > last_possible_start) {
 832         // no room to slide
 833         return nullptr;
 834       }
 835       for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
 836         if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
 837           beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,

 842           span_end++;
 843         }
 844       }
 845       // Here, either beg identifies a range of num regions all of which are in the Mutator free set, or beg > last_possible_start
 846       if (beg > last_possible_start) {
 847         // Hit the end, goodbye
 848         return nullptr;
 849       }
 850       end = beg;
 851     }
 852 
 853     if ((end - beg + 1) == num) {
 854       // found the match
 855       break;
 856     }
 857 
 858     end++;
 859   }
 860 
 861   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();

 862   // Initialize regions:
 863   for (idx_t i = beg; i <= end; i++) {
 864     ShenandoahHeapRegion* r = _heap->get_region(i);
 865     try_recycle_trashed(r);
 866 
 867     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 868     assert(r->is_empty(), "Should be empty");
 869 
 870     if (i == beg) {
 871       r->make_humongous_start();
 872     } else {
 873       r->make_humongous_cont();
 874     }
 875 
 876     // Trailing region may be non-full, record the remainder there
 877     size_t used_words;
 878     if ((i == end) && (remainder != 0)) {
 879       used_words = remainder;
 880     } else {
 881       used_words = ShenandoahHeapRegion::region_size_words();
 882     }
 883 


 884     r->set_top(r->bottom() + used_words);
 885   }
 886 
 887   if (remainder != 0) {
 888     // Record this remainder as allocation waste
 889     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 890   }
 891 
 892   // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
 893   _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
 894 
 895   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
 896   _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
 897   _partitions.assert_bounds();
 898   req.set_actual_size(words_size);



 899   return _heap->get_region(beg)->bottom();
 900 }
 901 
 902 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 903   if (r->is_trash()) {
 904     _heap->decrease_used(r->used());
 905     r->recycle();
 906   }
 907 }
 908 
 909 void ShenandoahFreeSet::recycle_trash() {
 910   // lock is not reentrable, check we don't have it
 911   shenandoah_assert_not_heaplocked();
 912 
 913   for (size_t i = 0; i < _heap->num_regions(); i++) {
 914     ShenandoahHeapRegion* r = _heap->get_region(i);
 915     if (r->is_trash()) {
 916       ShenandoahHeapLocker locker(_heap->lock());
 917       try_recycle_trashed(r);
 918     }
 919     SpinPause(); // allow allocators to take the lock
 920   }
 921 }
 922 





















 923 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 924   size_t idx = r->index();
 925 
 926   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
 927   assert(can_allocate_from(r), "Should not be allocated");
 928 
 929   size_t ac = alloc_capacity(r);
 930   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
 931                                                ShenandoahFreeSetPartitionId::Collector, ac);
 932   _partitions.assert_bounds();
 933 
 934   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
 935   // to recycle trash before attempting to allocate anything in the region.
 936 }
 937 
 938 void ShenandoahFreeSet::clear() {
 939   shenandoah_assert_heaplocked();
 940   clear_internal();
 941 }
 942 
 943 void ShenandoahFreeSet::clear_internal() {
 944   _partitions.make_all_regions_unavailable();
 945 }
 946 
 947 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &cset_regions) {




 948 
 949   cset_regions = 0;


 950   clear_internal();







 951   size_t region_size_bytes = _partitions.region_size_bytes();
 952   size_t max_regions = _partitions.max_regions();
 953 
 954   size_t mutator_leftmost = max_regions;
 955   size_t mutator_rightmost = 0;
 956   size_t mutator_leftmost_empty = max_regions;
 957   size_t mutator_rightmost_empty = 0;
 958 
 959   size_t mutator_regions = 0;
 960   size_t mutator_used = 0;
 961 







 962   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 963     ShenandoahHeapRegion* region = _heap->get_region(idx);
 964     if (region->is_trash()) {
 965       // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
 966       // The cset regions are not "trashed" until we have finished update refs.
 967       cset_regions++;












 968     }
 969     if (region->is_alloc_allowed() || region->is_trash()) {

 970 
 971       // Do not add regions that would almost surely fail allocation
 972       size_t ac = alloc_capacity(region);
 973       if (ac > PLAB::min_size() * HeapWordSize) {
 974         _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
 975 
 976         if (idx < mutator_leftmost) {
 977           mutator_leftmost = idx;
 978         }
 979         if (idx > mutator_rightmost) {
 980           mutator_rightmost = idx;
 981         }
 982         if (ac == region_size_bytes) {
 983           if (idx < mutator_leftmost_empty) {
 984             mutator_leftmost_empty = idx;
 985           }
 986           if (idx > mutator_rightmost_empty) {
 987             mutator_rightmost_empty = idx;
 988           }





























 989         }
 990         mutator_regions++;
 991         mutator_used += (region_size_bytes - ac);
 992 
 993         log_debug(gc)(
 994           "  Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator partition",
 995           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
 996           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
 997       }
 998     }
 999   }


















1000   _partitions.establish_mutator_intervals(mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
1001                                           mutator_regions, mutator_used);








1002 }
1003 
1004 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {




1005   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1006   size_t collector_empty_xfer = 0;
1007   size_t collector_not_empty_xfer = 0;




































1008 
1009   // Process empty regions within the Collector free partition
1010   if ((max_xfer_regions > 0) &&
1011       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1012        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1013     ShenandoahHeapLocker locker(_heap->lock());
1014     idx_t rightmost = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector);
1015     for (idx_t idx = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector);
1016          (max_xfer_regions > 0) && (idx <= rightmost); ) {
1017       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
1018              "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1019       // Note: can_allocate_from() denotes that region is entirely empty
1020       if (can_allocate_from(idx)) {
1021         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector,
1022                                                      ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1023         max_xfer_regions--;
1024         collector_empty_xfer += region_size_bytes;
1025       }
1026       idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);



1027     }
1028   }
1029 
1030   // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1031   if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1032                                  <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1033     ShenandoahHeapLocker locker(_heap->lock());
1034     idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
1035     for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1036          (max_xfer_regions > 0) && (idx <= rightmost); ) {
1037       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
1038              "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1039       size_t ac = alloc_capacity(idx);
1040       if (ac > 0) {
1041         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector,
1042                                                      ShenandoahFreeSetPartitionId::Mutator, ac);
1043         max_xfer_regions--;
1044         collector_not_empty_xfer += ac;
1045       }
1046       idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1047     }
1048   }
1049 
1050   size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
1051   log_info(gc)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve",
1052                byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer));



1053 }
1054 
1055 void ShenandoahFreeSet::prepare_to_rebuild(size_t &cset_regions) {



1056   shenandoah_assert_heaplocked();



1057 
1058   log_debug(gc)("Rebuilding FreeSet");



1059 
1060   // This places regions that have alloc_capacity into the mutator partition.
1061   find_regions_with_alloc_capacity(cset_regions);









1062 }
1063 
1064 void ShenandoahFreeSet::finish_rebuild(size_t cset_regions) {

1065   shenandoah_assert_heaplocked();

1066 
1067   // Our desire is to reserve this much memory for future evacuation.  We may end up reserving less, if
1068   // memory is in short supply.
1069 
1070   size_t reserve = _heap->max_capacity() * ShenandoahEvacReserve / 100;
1071   size_t available_in_collector_partition = (_partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector)
1072                                              - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector));
1073   size_t additional_reserve;
1074   if (available_in_collector_partition < reserve) {
1075     additional_reserve = reserve - available_in_collector_partition;
1076   } else {
1077     additional_reserve = 0;

1078   }
1079 
1080   reserve_regions(reserve);





1081   _partitions.assert_bounds();
1082   log_status();
1083 }
1084 
1085 void ShenandoahFreeSet::rebuild() {
1086   size_t cset_regions;
1087   prepare_to_rebuild(cset_regions);
1088   finish_rebuild(cset_regions);



































































1089 }
1090 
1091 void ShenandoahFreeSet::reserve_regions(size_t to_reserve) {





1092   for (size_t i = _heap->num_regions(); i > 0; i--) {
1093     size_t idx = i - 1;
1094     ShenandoahHeapRegion* r = _heap->get_region(idx);
1095 
1096     if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1097       continue;
1098     }
1099 
1100     size_t ac = alloc_capacity(r);
1101     assert (ac > 0, "Membership in free partition implies has capacity");

1102 

1103     bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1104     if (!move_to_collector) {
1105       // We've satisfied to_reserve

1106       break;
1107     }
1108 




















1109     if (move_to_collector) {
1110       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1111       // they were entirely empty.  This has the effect of causing new Mutator allocation to reside next to objects
1112       // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1113       // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1114       // survivor objects is less likely to be selected for the collection set.  This alternative implementation allows
1115       // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1116       // occupy regions comprised entirely of ephemeral objects.  These regions are highly likely to be included in the next
1117       // collection set, and they are easily evacuated because they have low density of live objects.
1118       _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1119                                                    ShenandoahFreeSetPartitionId::Collector, ac);
1120       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);






1121     }
1122   }
1123 
1124   if (LogTarget(Info, gc, free)::is_enabled()) {





1125     size_t reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector);
1126     if (reserve < to_reserve) {
1127       log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1128                     PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
1129     }
1130   }
1131 }
1132 


































1133 void ShenandoahFreeSet::log_status_under_lock() {
1134   // Must not be heap locked, it acquires heap lock only when log is enabled
1135   shenandoah_assert_not_heaplocked();
1136   if (LogTarget(Info, gc, free)::is_enabled()
1137       DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
1138     ShenandoahHeapLocker locker(_heap->lock());
1139     log_status();
1140   }
1141 }
1142 
1143 void ShenandoahFreeSet::log_status() {
1144   shenandoah_assert_heaplocked();
1145 
1146 #ifdef ASSERT
1147   // Dump of the FreeSet details is only enabled if assertions are enabled
1148   if (LogTarget(Debug, gc, free)::is_enabled()) {
1149 #define BUFFER_SIZE 80




1150     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();


1151     size_t consumed_collector = 0;
1152     size_t available_collector = 0;
1153     size_t consumed_mutator = 0;


1154     size_t available_mutator = 0;


1155 
1156     char buffer[BUFFER_SIZE];
1157     for (uint i = 0; i < BUFFER_SIZE; i++) {
1158       buffer[i] = '\0';
1159     }
1160     log_debug(gc)("FreeSet map legend: M:mutator_free C:collector_free H:humongous _:retired");
1161     log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "],"
1162                   " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "]",




1163                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1164                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),

1165                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1166                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));



1167 
1168     for (uint i = 0; i < _heap->num_regions(); i++) {
1169       ShenandoahHeapRegion *r = _heap->get_region(i);
1170       uint idx = i % 64;
1171       if ((i != 0) && (idx == 0)) {
1172         log_debug(gc)(" %6u: %s", i-64, buffer);
1173       }
1174       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
1175         size_t capacity = alloc_capacity(r);

1176         available_mutator += capacity;
1177         consumed_mutator += region_size_bytes - capacity;
1178         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1179       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
1180         size_t capacity = alloc_capacity(r);

1181         available_collector += capacity;
1182         consumed_collector += region_size_bytes - capacity;
1183         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';





1184       } else if (r->is_humongous()) {
1185         buffer[idx] = 'h';






1186       } else {
1187         buffer[idx] = '_';








1188       }
1189     }
1190     uint remnant = _heap->num_regions() % 64;
1191     if (remnant > 0) {
1192       buffer[remnant] = '\0';
1193     } else {
1194       remnant = 64;
1195     }
1196     log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1197   }
1198 #endif
1199 
1200   LogTarget(Info, gc, free) lt;
1201   if (lt.is_enabled()) {
1202     ResourceMark rm;
1203     LogStream ls(lt);
1204 
1205     {
1206       idx_t last_idx = 0;
1207       size_t max = 0;

1225             } else {
1226               empty_contig = 1;
1227             }
1228           } else {
1229             empty_contig = 0;
1230           }
1231           total_used += r->used();
1232           total_free += free;
1233           max_contig = MAX2(max_contig, empty_contig);
1234           last_idx = idx;
1235         }
1236       }
1237 
1238       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1239       size_t free = capacity() - used();
1240 
1241       // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1242       // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1243       // my internally tracked values of used() and free().
1244       assert(free == total_free, "Free memory should match");
1245 
1246       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1247                byte_size_in_proper_unit(free),          proper_unit_for_byte_size(free),
1248                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1249                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1250       );
1251 
1252       ls.print("Frag: ");
1253       size_t frag_ext;
1254       if (total_free_ext > 0) {
1255         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1256       } else {
1257         frag_ext = 0;
1258       }
1259       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1260 
1261       size_t frag_int;
1262       if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1263         frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1264                     / ShenandoahHeapRegion::region_size_bytes());
1265       } else {
1266         frag_int = 0;
1267       }

1274     {
1275       size_t max = 0;
1276       size_t total_free = 0;
1277       size_t total_used = 0;
1278 
1279       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1280            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1281         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
1282           ShenandoahHeapRegion *r = _heap->get_region(idx);
1283           size_t free = alloc_capacity(r);
1284           max = MAX2(max, free);
1285           total_free += free;
1286           total_used += r->used();
1287         }
1288       }
1289       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1290                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1291                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1292                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1293     }





















1294   }
1295 }
1296 
1297 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1298   shenandoah_assert_heaplocked();
1299   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
1300     switch (req.type()) {
1301       case ShenandoahAllocRequest::_alloc_shared:
1302       case ShenandoahAllocRequest::_alloc_shared_gc:
1303         in_new_region = true;
1304         return allocate_contiguous(req);

1305       case ShenandoahAllocRequest::_alloc_gclab:
1306       case ShenandoahAllocRequest::_alloc_tlab:
1307         in_new_region = false;
1308         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
1309                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
1310         return nullptr;
1311       default:
1312         ShouldNotReachHere();
1313         return nullptr;
1314     }
1315   } else {
1316     return allocate_single(req, in_new_region);
1317   }
1318 }
1319 
1320 void ShenandoahFreeSet::print_on(outputStream* out) const {
1321   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
1322   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1323   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1324     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1325            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1326     _heap->get_region(index)->print_on(out);
1327     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1328   }
1329   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
1330   rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
1331   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
1332     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
1333            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1334     _heap->get_region(index)->print_on(out);
1335     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);
1336   }









1337 }
1338 
1339 double ShenandoahFreeSet::internal_fragmentation() {
1340   double squared = 0;
1341   double linear = 0;
1342   int count = 0;
1343 
1344   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1345   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1346     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1347            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1348     ShenandoahHeapRegion* r = _heap->get_region(index);
1349     size_t used = r->used();
1350     squared += used * used;
1351     linear += used;
1352     count++;
1353     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1354   }
1355 
1356   if (count > 0) {

   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
  38 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "runtime/orderAccess.hpp"
  42 
  43 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
  44   switch (t) {
  45     case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
  46     case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
  47     case ShenandoahFreeSetPartitionId::Collector: return "Collector";
  48     case ShenandoahFreeSetPartitionId::OldCollector: return "OldCollector";
  49     default:
  50       ShouldNotReachHere();
  51       return "Unrecognized";
  52   }
  53 }
  54 
  55 #ifndef PRODUCT
  56 void ShenandoahRegionPartitions::dump_bitmap() const {
  57   log_info(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  58                "], Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
  59                _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  60                _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  61                _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
  62                _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)],
  63                _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)],
  64                _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]);
  65   log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  66                "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  67                "], Empty Old Collecto range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
  68                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  69                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  70                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
  71                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
  72                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
  73                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]);
  74 
  75   log_info(gc)("%6s: %18s %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "Old Collector Bits", "NotFree Bits");
  76   dump_bitmap_range(0, _max-1);
  77 }
  78 
  79 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
  80   assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
  81   assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
  82   idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
  83   idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
  84   idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
  85   while (aligned_start <= aligned_end) {
  86     dump_bitmap_row(aligned_start);
  87     aligned_start += alignment;
  88   }
  89 }
  90 
  91 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
  92   assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
  93   idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
  94   uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
  95   uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
  96   uintx old_collector_bits = _membership[int(ShenandoahFreeSetPartitionId::OldCollector)].bits_at(aligned_idx);
  97   uintx free_bits = mutator_bits | collector_bits | old_collector_bits;
  98   uintx notfree_bits =  ~free_bits;
  99   log_info(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
 100                aligned_idx, mutator_bits, collector_bits, old_collector_bits, notfree_bits);
 101 }
 102 #endif
 103 
 104 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
 105     _max(max_regions),
 106     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
 107     _free_set(free_set),
 108     _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) , ShenandoahSimpleBitMap(max_regions) }
 109 {
 110   make_all_regions_unavailable();
 111 }
 112 
 113 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 114   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 115 }
 116 
 117 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 118   ShenandoahHeapRegion* r = _heap->get_region(idx);
 119   return can_allocate_from(r);
 120 }
 121 
 122 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 123   if (r->is_trash()) {
 124     // This would be recycled on allocation path
 125     return ShenandoahHeapRegion::region_size_bytes();
 126   } else {
 127     return r->free();
 128   }

 158   // which_partition is shrinking after the region that used to be leftmost is retired.
 159   return idx;
 160 }
 161 
 162 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
 163   for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
 164     _membership[partition_id].clear_all();
 165     _leftmosts[partition_id] = _max;
 166     _rightmosts[partition_id] = -1;
 167     _leftmosts_empty[partition_id] = _max;
 168     _rightmosts_empty[partition_id] = -1;;
 169     _capacity[partition_id] = 0;
 170     _used[partition_id] = 0;
 171   }
 172   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 173 }
 174 
 175 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
 176                                                              idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
 177                                                              size_t mutator_region_count, size_t mutator_used) {

 178   _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
 179   _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
 180   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
 181   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
 182 
 183   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 184   _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
 185   _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
 186 
 187   _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 188   _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 189   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 190   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 191 
 192   _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 193   _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 194   _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 195 }
 196 
 197 void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost,
 198                                                                    idx_t old_collector_leftmost_empty,
 199                                                                    idx_t old_collector_rightmost_empty,
 200                                                                    size_t old_collector_region_count, size_t old_collector_used) {
 201   _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost;
 202   _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost;
 203   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty;
 204   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost_empty;
 205 
 206   _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count;
 207   _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used;
 208   _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count * _region_size_bytes;
 209 }
 210 
 211 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
 212   assert (which_partition < NumPartitions, "Partition must be valid");
 213   _used[int(which_partition)] += bytes;
 214   assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
 215           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
 216           _used[int(which_partition)], _capacity[int(which_partition)], bytes);
 217 }
 218 
 219 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
 220   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 221   assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
 222   if (low_idx == leftmost(partition)) {
 223     assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
 224     if (high_idx + 1 == _max) {
 225       _leftmosts[int(partition)] = _max;
 226     } else {
 227       _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
 228     }
 229     if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
 230       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
 231       _leftmosts_empty[int(partition)] = _leftmosts[int(partition)];
 232     }
 233   }
 234   if (high_idx == _rightmosts[int(partition)]) {
 235     assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
 236     if (low_idx == 0) {
 237       _rightmosts[int(partition)] = -1;
 238     } else {
 239       _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
 240     }
 241     if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
 242       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 243       _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
 244     }
 245   }
 246   if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
 247     _leftmosts[int(partition)] = _max;
 248     _rightmosts[int(partition)] = -1;
 249     _leftmosts_empty[int(partition)] = _max;
 250     _rightmosts_empty[int(partition)] = -1;
 251   }

 298 
 299   if (used_bytes < _region_size_bytes) {
 300     // Count the alignment pad remnant of memory as used when we retire this region
 301     increase_used(partition, _region_size_bytes - used_bytes);
 302   }
 303   _membership[int(partition)].clear_bit(idx);
 304   shrink_interval_if_boundary_modified(partition, idx);
 305   _region_counts[int(partition)]--;
 306 }
 307 
 308 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
 309   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 310   assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
 311   assert (which_partition < NumPartitions, "selected free partition must be valid");
 312   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 313 
 314   _membership[int(which_partition)].set_bit(idx);
 315   _capacity[int(which_partition)] += _region_size_bytes;
 316   _used[int(which_partition)] += _region_size_bytes - available;
 317   expand_interval_if_boundary_modified(which_partition, idx, available);

 318   _region_counts[int(which_partition)]++;
 319 }
 320 
 321 bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) {
 322   return (p == ShenandoahFreeSetPartitionId::Mutator);
 323 }
 324 
 325 bool ShenandoahRegionPartitions::is_young_collector_partition(ShenandoahFreeSetPartitionId p) {
 326   return (p == ShenandoahFreeSetPartitionId::Collector);
 327 }
 328 
 329 bool ShenandoahRegionPartitions::is_old_collector_partition(ShenandoahFreeSetPartitionId p) {
 330   return (p == ShenandoahFreeSetPartitionId::OldCollector);
 331 }
 332 
 333 bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_region) {
 334   return (available_in_region == _region_size_bytes);
 335 }
 336 
 337 
 338 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
 339                                                                   ShenandoahFreeSetPartitionId new_partition, size_t available) {
 340   ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx);
 341   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 342   assert (orig_partition < NumPartitions, "Original partition must be valid");
 343   assert (new_partition < NumPartitions, "New partition must be valid");
 344   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 345   assert (_membership[int(orig_partition)].is_set(idx), "Cannot move from partition unless in partition");
 346   assert ((r != nullptr) && ((r->is_trash() && (available == _region_size_bytes)) ||
 347                              (r->used() + available == _region_size_bytes)),
 348           "Used: " SIZE_FORMAT " + available: " SIZE_FORMAT " should equal region size: " SIZE_FORMAT,
 349           ShenandoahHeap::heap()->get_region(idx)->used(), available, _region_size_bytes);
 350 
 351   // Expected transitions:
 352   //  During rebuild:         Mutator => Collector
 353   //                          Mutator empty => Collector
 354   //                          Mutator empty => OldCollector
 355   //  During flip_to_gc:      Mutator empty => Collector
 356   //                          Mutator empty => OldCollector
 357   // At start of update refs: Collector => Mutator
 358   //                          OldCollector Empty => Mutator
 359   assert ((is_mutator_partition(orig_partition) && is_young_collector_partition(new_partition)) ||
 360           (is_mutator_partition(orig_partition) &&
 361            available_implies_empty(available) && is_old_collector_partition(new_partition)) ||
 362           (is_young_collector_partition(orig_partition) && is_mutator_partition(new_partition)) ||
 363           (is_old_collector_partition(orig_partition)
 364            && available_implies_empty(available) && is_mutator_partition(new_partition)),
 365           "Unexpected movement between partitions, available: " SIZE_FORMAT ", _region_size_bytes: " SIZE_FORMAT
 366           ", orig_partition: %s, new_partition: %s",
 367           available, _region_size_bytes, partition_name(orig_partition), partition_name(new_partition));
 368 
 369   size_t used = _region_size_bytes - available;
 370   assert (_used[int(orig_partition)] >= used,
 371           "Orig partition used: " SIZE_FORMAT " must exceed moved used: " SIZE_FORMAT " within region " SSIZE_FORMAT,
 372           _used[int(orig_partition)], used, idx);
 373 
 374   _membership[int(orig_partition)].clear_bit(idx);
 375   _membership[int(new_partition)].set_bit(idx);
 376 
 377   _capacity[int(orig_partition)] -= _region_size_bytes;
 378   _used[int(orig_partition)] -= used;
 379   shrink_interval_if_boundary_modified(orig_partition, idx);
 380 
 381   _capacity[int(new_partition)] += _region_size_bytes;;
 382   _used[int(new_partition)] += used;
 383   expand_interval_if_boundary_modified(new_partition, idx, available);
 384 
 385   _region_counts[int(orig_partition)]--;
 386   _region_counts[int(new_partition)]++;
 387 }
 388 
 389 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
 390   return partition_name(membership(idx));
 391 }
 392 

 521   idx_t leftmosts[UIntNumPartitions];
 522   idx_t rightmosts[UIntNumPartitions];
 523   idx_t empty_leftmosts[UIntNumPartitions];
 524   idx_t empty_rightmosts[UIntNumPartitions];
 525 
 526   for (uint i = 0; i < UIntNumPartitions; i++) {
 527     leftmosts[i] = _max;
 528     empty_leftmosts[i] = _max;
 529     rightmosts[i] = -1;
 530     empty_rightmosts[i] = -1;
 531   }
 532 
 533   for (idx_t i = 0; i < _max; i++) {
 534     ShenandoahFreeSetPartitionId partition = membership(i);
 535     switch (partition) {
 536       case ShenandoahFreeSetPartitionId::NotFree:
 537         break;
 538 
 539       case ShenandoahFreeSetPartitionId::Mutator:
 540       case ShenandoahFreeSetPartitionId::Collector:
 541       case ShenandoahFreeSetPartitionId::OldCollector:
 542       {
 543         size_t capacity = _free_set->alloc_capacity(i);
 544         bool is_empty = (capacity == _region_size_bytes);
 545         assert(capacity > 0, "free regions must have allocation capacity");
 546         if (i < leftmosts[int(partition)]) {
 547           leftmosts[int(partition)] = i;
 548         }
 549         if (is_empty && (i < empty_leftmosts[int(partition)])) {
 550           empty_leftmosts[int(partition)] = i;
 551         }
 552         if (i > rightmosts[int(partition)]) {
 553           rightmosts[int(partition)] = i;
 554         }
 555         if (is_empty && (i > empty_rightmosts[int(partition)])) {
 556           empty_rightmosts[int(partition)] = i;
 557         }
 558         break;
 559       }
 560 
 561       default:

 611 
 612   // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 613   // Likewise for empty region partitions.
 614   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 615   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 616   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
 617           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 618           beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
 619   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
 620           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 621           end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
 622 
 623   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 624   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 625   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 626           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 627           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
 628   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 629           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 630           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
 631 
 632   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 633   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 634           leftmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
 635   assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 636           rightmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
 637 
 638   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
 639           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::OldCollector),
 640                                   ShenandoahFreeSetPartitionId::OldCollector),
 641           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::OldCollector));
 642   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
 643           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::OldCollector),
 644                                   ShenandoahFreeSetPartitionId::OldCollector),
 645           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 646 
 647   // If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 648   // Likewise for empty region partitions.
 649   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 650   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 651   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
 652           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 653           beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
 654   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
 655           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 656           end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 657 
 658   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 659   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 660   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 661           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 662           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 663   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 664           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 665           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 666 }
 667 #endif
 668 
 669 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 670   _heap(heap),
 671   _partitions(max_regions, this),

 672   _alloc_bias_weight(0)
 673 {
 674   clear_internal();
 675 }
 676 
 677 void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
 678   shenandoah_assert_heaplocked();
 679   size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
 680   size_t idx = region->index();
 681   size_t capacity = alloc_capacity(region);
 682   assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree,
 683          "Regions promoted in place should have been excluded from Mutator partition");
 684   if (capacity >= plab_min_size_in_bytes) {
 685     _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, capacity);
 686     _heap->old_generation()->augment_promoted_reserve(capacity);
 687   }
 688 }
 689 
 690 HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition,
 691                                                                       ShenandoahAffiliation affiliation,
 692                                                                       ShenandoahAllocRequest& req, bool& in_new_region) {
 693   shenandoah_assert_heaplocked();
 694   idx_t rightmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
 695                                _partitions.rightmost_empty(which_partition): _partitions.rightmost(which_partition));
 696   idx_t leftmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
 697                               _partitions.leftmost_empty(which_partition): _partitions.leftmost(which_partition));
 698   if (_partitions.alloc_from_left_bias(which_partition)) {
 699     for (idx_t idx = leftmost_collector; idx <= rightmost_collector; ) {
 700       assert(_partitions.in_free_set(which_partition, idx), "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 701       ShenandoahHeapRegion* r = _heap->get_region(idx);
 702       if (r->affiliation() == affiliation) {
 703         HeapWord* result = try_allocate_in(r, req, in_new_region);
 704         if (result != nullptr) {
 705           return result;
 706         }
 707       }
 708       idx = _partitions.find_index_of_next_available_region(which_partition, idx + 1);
 709     }
 710   } else {
 711     for (idx_t idx = rightmost_collector; idx >= leftmost_collector; ) {
 712       assert(_partitions.in_free_set(which_partition, idx),
 713              "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 714       ShenandoahHeapRegion* r = _heap->get_region(idx);
 715       if (r->affiliation() == affiliation) {
 716         HeapWord* result = try_allocate_in(r, req, in_new_region);
 717         if (result != nullptr) {
 718           return result;
 719         }
 720       }
 721       idx = _partitions.find_index_of_previous_available_region(which_partition, idx - 1);
 722     }
 723   }
 724   log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
 725                       shenandoah_affiliation_name(affiliation), p2i(&req));
 726   return nullptr;
 727 }
 728 
 729 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 730   shenandoah_assert_heaplocked();
 731 
 732   // Scan the bitmap looking for a first fit.
 733   //
 734   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 735   // we would find the region to allocate at right away.
 736   //
 737   // Allocations are biased: GC allocations are taken from the high end of the heap.  Regular (and TLAB)
 738   // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
 739   // Humongous mutator allocations are taken from the bottom of the heap.
 740   //
 741   // Free set maintains mutator and collector partitions.  Normally, each allocates only from its partition,
 742   // except in special cases when the collector steals regions from the mutator partition.
 743 
 744   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 745   bool allow_new_region = true;
 746   if (_heap->mode()->is_generational()) {
 747     switch (req.affiliation()) {
 748       case ShenandoahAffiliation::OLD_GENERATION:
 749         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 750         if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
 751           allow_new_region = false;
 752         }
 753         break;
 754 
 755       case ShenandoahAffiliation::YOUNG_GENERATION:
 756         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 757         if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
 758           allow_new_region = false;
 759         }
 760         break;
 761 
 762       case ShenandoahAffiliation::FREE:
 763         fatal("Should request affiliation");
 764 
 765       default:
 766         ShouldNotReachHere();
 767         break;
 768     }
 769   }
 770   switch (req.type()) {
 771     case ShenandoahAllocRequest::_alloc_tlab:
 772     case ShenandoahAllocRequest::_alloc_shared: {
 773       // Try to allocate in the mutator view
 774       if (_alloc_bias_weight-- <= 0) {
 775         // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
 776         // of the heap.  Typically, these are the more recently engaged regions and the objects in these regions have not
 777         // yet had a chance to die (and/or are treated as floating garbage).  If we use the same allocation bias on each
 778         // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
 779         // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
 780         // during the most recent GC pass may once again prevent the region from being collected.  We have found that
 781         // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
 782         // benchmarks.  In the best case, this has the effect of consuming these partially consumed regions before
 783         // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
 784         //
 785         // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
 786         // available regions.  Other potential benefits:
 787         //  1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
 788         //  2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
 789         //     late in the GC cycle.
 790         idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
 791                                      - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
 792         idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
 793                                       - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 794         _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
 795         _alloc_bias_weight = _InitialAllocBiasWeight;
 796       }
 797       if (!_partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)) {
 798         // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
 799         if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 800           // Use signed idx.  Otherwise, loop will never terminate.
 801           idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
 802           for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
 803             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 804                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 805             ShenandoahHeapRegion* r = _heap->get_region(idx);
 806             // try_allocate_in() increases used if the allocation is successful.
 807             HeapWord* result;
 808             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 809             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 810               return result;
 811             }
 812             idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 813           }
 814         }
 815       } else {
 816         // Allocate from low to high memory.  This keeps the range of fully empty regions more tightly packed.
 817         // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle.  So this

 822           for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
 823             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 824                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 825             ShenandoahHeapRegion* r = _heap->get_region(idx);
 826             // try_allocate_in() increases used if the allocation is successful.
 827             HeapWord* result;
 828             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 829             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 830               return result;
 831             }
 832             idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
 833           }
 834         }
 835       }
 836       // There is no recovery. Mutator does not touch collector view at all.
 837       break;
 838     }
 839     case ShenandoahAllocRequest::_alloc_gclab:
 840       // GCLABs are for evacuation so we must be in evacuation phase.
 841 
 842     case ShenandoahAllocRequest::_alloc_plab: {
 843       // PLABs always reside in old-gen and are only allocated during
 844       // evacuation phase.
 845 
 846     case ShenandoahAllocRequest::_alloc_shared_gc: {
 847       // Fast-path: try to allocate in the collector view first
 848       HeapWord* result;
 849       result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
 850                                                         ShenandoahFreeSetPartitionId::Collector,
 851                                                         req.affiliation(), req, in_new_region);
 852       if (result != nullptr) {
 853         return result;
 854       } else if (allow_new_region) {
 855         // Try a free region that is dedicated to GC allocations.
 856         result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
 857                                                           ShenandoahFreeSetPartitionId::Collector,
 858                                                           ShenandoahAffiliation::FREE, req, in_new_region);
 859         if (result != nullptr) {
 860           return result;
 861         }

 862       }
 863 
 864       // No dice. Can we borrow space from mutator view?
 865       if (!ShenandoahEvacReserveOverflow) {
 866         return nullptr;
 867       }
 868       if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
 869         // This allows us to flip a mutator region to old_collector
 870         allow_new_region = true;
 871       }
 872 
 873       // We should expand old-gen if this can prevent an old-gen evacuation failure.  We don't care so much about
 874       // promotion failures since they can be mitigated in a subsequent GC pass.  Would be nice to know if this
 875       // allocation request is for evacuation or promotion.  Individual threads limit their use of PLAB memory for
 876       // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
 877       // only for old-gen evacuations.
 878 
 879       // TODO:
 880       // if (GC is idle (out of cycle) and mutator allocation fails and there is memory reserved in Collector
 881       // or OldCollector sets, transfer a region of memory so that we can satisfy the allocation request, and
 882       // immediately trigger the start of GC.  Is better to satisfy the allocation than to trigger out-of-cycle
 883       // allocation failure (even if this means we have a little less memory to handle evacuations during the
 884       // subsequent GC pass).
 885 
 886       if (allow_new_region) {
 887         // Try to steal an empty region from the mutator view.
 888         idx_t rightmost_mutator = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 889         idx_t leftmost_mutator =  _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 890         for (idx_t idx = rightmost_mutator; idx >= leftmost_mutator; ) {
 891           assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 892                  "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 893           ShenandoahHeapRegion* r = _heap->get_region(idx);
 894           if (can_allocate_from(r)) {
 895             if (req.is_old()) {
 896               flip_to_old_gc(r);
 897             } else {
 898               flip_to_gc(r);
 899             }
 900             // Region r is entirely empty.  If try_allocat_in fails on region r, something else is really wrong.
 901             // Don't bother to retry with other regions.
 902             log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 903             return try_allocate_in(r, req, in_new_region);
 904           }
 905           idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 906         }

 907       }

 908       // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
 909       // due to GC allocations would expose unparsable mutator allocations.
 910       break;
 911     }
 912     }
 913     default:
 914       ShouldNotReachHere();
 915   }
 916   return nullptr;
 917 }
 918 
 919 // This work method takes an argument corresponding to the number of bytes
 920 // free in a region, and returns the largest amount in heapwords that can be allocated
 921 // such that both of the following conditions are satisfied:
 922 //
 923 // 1. it is a multiple of card size
 924 // 2. any remaining shard may be filled with a filler object
 925 //
 926 // The idea is that the allocation starts and ends at card boundaries. Because
 927 // a region ('s end) is card-aligned, the remainder shard that must be filled is
 928 // at the start of the free space.
 929 //
 930 // This is merely a helper method to use for the purpose of such a calculation.
 931 size_t ShenandoahFreeSet::get_usable_free_words(size_t free_bytes) const {
 932   // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
 933   //      free is 514
 934   //      usable_free is 512, which is decreased to 0
 935   size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
 936   assert(usable_free <= free_bytes, "Sanity check");
 937   if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
 938     // After aligning to card multiples, the remainder would be smaller than
 939     // the minimum filler object, so we'll need to take away another card's
 940     // worth to construct a filler object.
 941     if (usable_free >= CardTable::card_size()) {
 942       usable_free -= CardTable::card_size();
 943     } else {
 944       assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
 945     }
 946   }
 947 
 948   return usable_free / HeapWordSize;
 949 }
 950 
 951 // Given a size argument, which is a multiple of card size, a request struct
 952 // for a PLAB, and an old region, return a pointer to the allocated space for
 953 // a PLAB which is card-aligned and where any remaining shard in the region
 954 // has been suitably filled by a filler object.
 955 // It is assumed (and assertion-checked) that such an allocation is always possible.
 956 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
 957   assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 958   assert(r->is_old(), "All PLABs reside in old-gen");
 959   assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
 960   assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design");
 961 
 962   HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
 963   assert(result != nullptr, "Allocation cannot fail");
 964   assert(r->top() <= r->end(), "Allocation cannot span end of region");
 965   assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
 966   return result;
 967 }
 968 
 969 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 970   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 971   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
 972     return nullptr;
 973   }

 974   HeapWord* result = nullptr;
 975   try_recycle_trashed(r);
 976   in_new_region = r->is_empty();
 977 
 978   if (in_new_region) {
 979     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 980                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
 981     assert(!r->is_affiliated(), "New region " SIZE_FORMAT " should be unaffiliated", r->index());
 982     r->set_affiliation(req.affiliation());
 983     if (r->is_old()) {
 984       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
 985       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
 986       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
 987       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
 988       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
 989       // coalesce-and-fill processing.
 990       r->end_preemptible_coalesce_and_fill();
 991       _heap->old_generation()->clear_cards_for(r);
 992     }
 993     _heap->generation_for(r->affiliation())->increment_affiliated_region_count();
 994 
 995 #ifdef ASSERT
 996     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
 997     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
 998     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
 999 #endif
1000     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
1001                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
1002   } else {
1003     assert(r->is_affiliated(), "Region " SIZE_FORMAT " that is not new should be affiliated", r->index());
1004     if (r->affiliation() != req.affiliation()) {
1005       assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
1006              req.affiliation_name(), r->affiliation_name());
1007       return nullptr;
1008     }
1009   }
1010 
1011   // req.size() is in words, r->free() is in bytes.
1012   if (req.is_lab_alloc()) {

1013     size_t adjusted_size = req.size();
1014     size_t free = r->free();    // free represents bytes available within region r
1015     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1016       // This is a PLAB allocation
1017       assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
1018       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
1019              "PLABS must be allocated in old_collector_free regions");
1020 
1021       // Need to assure that plabs are aligned on multiple of card region
1022       // Convert free from unaligned bytes to aligned number of words
1023       size_t usable_free = get_usable_free_words(free);
1024       if (adjusted_size > usable_free) {
1025         adjusted_size = usable_free;
1026       }
1027       adjusted_size = align_down(adjusted_size, CardTable::card_size_in_words());
1028       if (adjusted_size >= req.min_size()) {
1029         result = allocate_aligned_plab(adjusted_size, req, r);
1030         assert(result != nullptr, "allocate must succeed");
1031         req.set_actual_size(adjusted_size);
1032       } else {
1033         // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
1034         log_trace(gc, free)("Failed to shrink PLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1035                             " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1036       }
1037     } else {
1038       // This is a GCLAB or a TLAB allocation
1039       // Convert free from unaligned bytes to aligned number of words
1040       free = align_down(free >> LogHeapWordSize, MinObjAlignment);
1041       if (adjusted_size > free) {
1042         adjusted_size = free;
1043       }
1044       if (adjusted_size >= req.min_size()) {
1045         result = r->allocate(adjusted_size, req);
1046         assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
1047         req.set_actual_size(adjusted_size);
1048       } else {
1049         log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1050                             " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1051       }
1052     }
1053   } else {
1054     size_t size = req.size();
1055     result = r->allocate(size, req);
1056     if (result != nullptr) {
1057       // Record actual allocation size




1058       req.set_actual_size(size);
1059     }
1060   }
1061 
1062   if (result != nullptr) {
1063     // Allocation successful, bump stats:
1064     if (req.is_mutator_alloc()) {
1065       assert(req.is_young(), "Mutator allocations always come from young generation.");
1066       _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
1067     } else {
1068       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
1069 
1070       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
1071       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
1072       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
1073       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
1074       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
1075       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
1076       // next evacuation pass.
1077       r->set_update_watermark(r->top());
1078       if (r->is_old()) {
1079         _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, req.actual_size() * HeapWordSize);
1080         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
1081         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
1082       } else {
1083         _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, req.actual_size() * HeapWordSize);
1084       }
1085     }
1086   }
1087 
1088   static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
1089   size_t ac = alloc_capacity(r);
1090 
1091   if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
1092     // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
1093     // Note that retire_from_partition() increases used to account for waste.
1094 
1095     // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
1096     // then retire the region so that subsequent searches can find available memory more quickly.
1097 
1098     size_t idx = r->index();
1099     ShenandoahFreeSetPartitionId orig_partition;
1100     if (req.is_mutator_alloc()) {
1101       orig_partition = ShenandoahFreeSetPartitionId::Mutator;
1102     } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) {
1103       orig_partition = ShenandoahFreeSetPartitionId::Collector;
1104     } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1105       orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1106     } else {
1107       assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type");
1108       if (req.is_old()) {
1109         orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1110       } else {
1111         orig_partition = ShenandoahFreeSetPartitionId::Collector;
1112       }
1113     }
1114     _partitions.retire_from_partition(orig_partition, idx, r->used());
1115     _partitions.assert_bounds();
1116   }
1117   return result;
1118 }
1119 
1120 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
1121   assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
1122   shenandoah_assert_heaplocked();
1123 
1124   size_t words_size = req.size();
1125   idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1126 
1127   assert(req.is_young(), "Humongous regions always allocated in YOUNG");
1128   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
1129 
1130   // Check if there are enough regions left to satisfy allocation.
1131   if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
1132     return nullptr;
1133   }
1134 
1135   idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
1136   idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
1137   idx_t last_possible_start = end_range - num;
1138 
1139   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
1140   // inclusive. Contiguous allocations are biased to the beginning.
1141   idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
1142                                                                           start_range, num);
1143   if (beg > last_possible_start) {
1144     // Hit the end, goodbye
1145     return nullptr;
1146   }
1147   idx_t end = beg;
1148 
1149   while (true) {
1150     // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
1151     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.  If we can extend
1152     // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
1153     while (!can_allocate_from(_heap->get_region(end))) {
1154       // region[end] is not empty, so we restart our search after region[end]
1155       idx_t slide_delta = end + 1 - beg;
1156       if (beg + slide_delta > last_possible_start) {
1157         // no room to slide
1158         return nullptr;
1159       }
1160       for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
1161         if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
1162           beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,

1167           span_end++;
1168         }
1169       }
1170       // Here, either beg identifies a range of num regions all of which are in the Mutator free set, or beg > last_possible_start
1171       if (beg > last_possible_start) {
1172         // Hit the end, goodbye
1173         return nullptr;
1174       }
1175       end = beg;
1176     }
1177 
1178     if ((end - beg + 1) == num) {
1179       // found the match
1180       break;
1181     }
1182 
1183     end++;
1184   }
1185 
1186   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1187   bool is_generational = _heap->mode()->is_generational();
1188   // Initialize regions:
1189   for (idx_t i = beg; i <= end; i++) {
1190     ShenandoahHeapRegion* r = _heap->get_region(i);
1191     try_recycle_trashed(r);
1192 
1193     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
1194     assert(r->is_empty(), "Should be empty");
1195 
1196     if (i == beg) {
1197       r->make_humongous_start();
1198     } else {
1199       r->make_humongous_cont();
1200     }
1201 
1202     // Trailing region may be non-full, record the remainder there
1203     size_t used_words;
1204     if ((i == end) && (remainder != 0)) {
1205       used_words = remainder;
1206     } else {
1207       used_words = ShenandoahHeapRegion::region_size_words();
1208     }
1209 
1210     r->set_affiliation(req.affiliation());
1211     r->set_update_watermark(r->bottom());
1212     r->set_top(r->bottom() + used_words);
1213   }
1214   generation->increase_affiliated_region_count(num);
1215   if (remainder != 0) {
1216     // Record this remainder as allocation waste
1217     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
1218   }
1219 
1220   // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
1221   _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
1222 
1223   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
1224   _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
1225   _partitions.assert_bounds();
1226   req.set_actual_size(words_size);
1227   if (remainder != 0) {
1228     req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
1229   }
1230   return _heap->get_region(beg)->bottom();
1231 }
1232 
1233 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
1234   if (r->is_trash()) {

1235     r->recycle();
1236   }
1237 }
1238 
1239 void ShenandoahFreeSet::recycle_trash() {
1240   // lock is not reentrable, check we don't have it
1241   shenandoah_assert_not_heaplocked();

1242   for (size_t i = 0; i < _heap->num_regions(); i++) {
1243     ShenandoahHeapRegion* r = _heap->get_region(i);
1244     if (r->is_trash()) {
1245       ShenandoahHeapLocker locker(_heap->lock());
1246       try_recycle_trashed(r);
1247     }
1248     SpinPause(); // allow allocators to take the lock
1249   }
1250 }
1251 
1252 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
1253   size_t idx = r->index();
1254 
1255   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1256   assert(can_allocate_from(r), "Should not be allocated");
1257 
1258   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
1259   size_t region_capacity = alloc_capacity(r);
1260   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1261                                                ShenandoahFreeSetPartitionId::OldCollector, region_capacity);
1262   _partitions.assert_bounds();
1263   _heap->old_generation()->augment_evacuation_reserve(region_capacity);
1264   bool transferred = gen_heap->generation_sizer()->transfer_to_old(1);
1265   if (!transferred) {
1266     log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx);
1267     gen_heap->generation_sizer()->force_transfer_to_old(1);
1268   }
1269   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1270   // to recycle trash before attempting to allocate anything in the region.
1271 }
1272 
1273 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1274   size_t idx = r->index();
1275 
1276   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1277   assert(can_allocate_from(r), "Should not be allocated");
1278 
1279   size_t ac = alloc_capacity(r);
1280   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1281                                                ShenandoahFreeSetPartitionId::Collector, ac);
1282   _partitions.assert_bounds();
1283 
1284   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1285   // to recycle trash before attempting to allocate anything in the region.
1286 }
1287 
1288 void ShenandoahFreeSet::clear() {
1289   shenandoah_assert_heaplocked();
1290   clear_internal();
1291 }
1292 
1293 void ShenandoahFreeSet::clear_internal() {
1294   _partitions.make_all_regions_unavailable();

1295 
1296   _alloc_bias_weight = 0;
1297   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true);
1298   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Collector, false);
1299   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false);
1300 }
1301 
1302 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
1303                                                          size_t &first_old_region, size_t &last_old_region,
1304                                                          size_t &old_region_count) {
1305   clear_internal();
1306 
1307   first_old_region = _heap->num_regions();
1308   last_old_region = 0;
1309   old_region_count = 0;
1310   old_cset_regions = 0;
1311   young_cset_regions = 0;
1312 
1313   size_t region_size_bytes = _partitions.region_size_bytes();
1314   size_t max_regions = _partitions.max_regions();
1315 
1316   size_t mutator_leftmost = max_regions;
1317   size_t mutator_rightmost = 0;
1318   size_t mutator_leftmost_empty = max_regions;
1319   size_t mutator_rightmost_empty = 0;

1320   size_t mutator_regions = 0;
1321   size_t mutator_used = 0;
1322 
1323   size_t old_collector_leftmost = max_regions;
1324   size_t old_collector_rightmost = 0;
1325   size_t old_collector_leftmost_empty = max_regions;
1326   size_t old_collector_rightmost_empty = 0;
1327   size_t old_collector_regions = 0;
1328   size_t old_collector_used = 0;
1329 
1330   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
1331     ShenandoahHeapRegion* region = _heap->get_region(idx);
1332     if (region->is_trash()) {
1333       // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
1334       // The cset regions are not "trashed" until we have finished update refs.
1335       if (region->is_old()) {
1336         old_cset_regions++;
1337       } else {
1338         assert(region->is_young(), "Trashed region should be old or young");
1339         young_cset_regions++;
1340       }
1341     } else if (region->is_old()) {
1342       // count both humongous and regular regions, but don't count trash (cset) regions.
1343       old_region_count++;
1344       if (first_old_region > idx) {
1345         first_old_region = idx;
1346       }
1347       last_old_region = idx;
1348     }
1349     if (region->is_alloc_allowed() || region->is_trash()) {
1350       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1351 
1352       // Do not add regions that would almost surely fail allocation
1353       size_t ac = alloc_capacity(region);
1354       if (ac > PLAB::min_size() * HeapWordSize) {
1355         if (region->is_trash() || !region->is_old()) {
1356           // Both young and old collected regions (trashed) are placed into the Mutator set
1357           _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
1358           if (idx < mutator_leftmost) {
1359             mutator_leftmost = idx;






1360           }
1361           if (idx > mutator_rightmost) {
1362             mutator_rightmost = idx;
1363           }
1364           if (ac == region_size_bytes) {
1365             if (idx < mutator_leftmost_empty) {
1366               mutator_leftmost_empty = idx;
1367             }
1368             if (idx > mutator_rightmost_empty) {
1369               mutator_rightmost_empty = idx;
1370             }
1371           }
1372           mutator_regions++;
1373           mutator_used += (region_size_bytes - ac);
1374         } else {
1375           // !region->is_trash() && region is_old()
1376           _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::OldCollector);
1377           if (idx < old_collector_leftmost) {
1378             old_collector_leftmost = idx;
1379           }
1380           if (idx > old_collector_rightmost) {
1381             old_collector_rightmost = idx;
1382           }
1383           if (ac == region_size_bytes) {
1384             if (idx < old_collector_leftmost_empty) {
1385               old_collector_leftmost_empty = idx;
1386             }
1387             if (idx > old_collector_rightmost_empty) {
1388               old_collector_rightmost_empty = idx;
1389             }
1390           }
1391           old_collector_regions++;
1392           old_collector_used += (region_size_bytes - ac);
1393         }







1394       }
1395     }
1396   }
1397   log_debug(gc)("  At end of prep_to_rebuild, mutator_leftmost: " SIZE_FORMAT
1398                 ", mutator_rightmost: " SIZE_FORMAT
1399                 ", mutator_leftmost_empty: " SIZE_FORMAT
1400                 ", mutator_rightmost_empty: " SIZE_FORMAT
1401                 ", mutator_regions: " SIZE_FORMAT
1402                 ", mutator_used: " SIZE_FORMAT,
1403                 mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
1404                 mutator_regions, mutator_used);
1405 
1406   log_debug(gc)("  old_collector_leftmost: " SIZE_FORMAT
1407                 ", old_collector_rightmost: " SIZE_FORMAT
1408                 ", old_collector_leftmost_empty: " SIZE_FORMAT
1409                 ", old_collector_rightmost_empty: " SIZE_FORMAT
1410                 ", old_collector_regions: " SIZE_FORMAT
1411                 ", old_collector_used: " SIZE_FORMAT,
1412                 old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty,
1413                 old_collector_regions, old_collector_used);
1414 
1415   _partitions.establish_mutator_intervals(mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
1416                                           mutator_regions, mutator_used);
1417   _partitions.establish_old_collector_intervals(old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty,
1418                                                 old_collector_rightmost_empty, old_collector_regions, old_collector_used);
1419   log_debug(gc)("  After find_regions_with_alloc_capacity(), Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1420                 "  Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1421                 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1422                 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1423                 _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1424                 _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1425 }
1426 
1427 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1428 size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
1429                                                                                    size_t max_xfer_regions,
1430                                                                                    size_t& bytes_transferred) {
1431   shenandoah_assert_heaplocked();
1432   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1433   size_t transferred_regions = 0;
1434   idx_t rightmost = _partitions.rightmost_empty(which_collector);
1435   for (idx_t idx = _partitions.leftmost_empty(which_collector); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
1436     assert(_partitions.in_free_set(which_collector, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1437     // Note: can_allocate_from() denotes that region is entirely empty
1438     if (can_allocate_from(idx)) {
1439       _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1440       transferred_regions++;
1441       bytes_transferred += region_size_bytes;
1442     }
1443     idx = _partitions.find_index_of_next_available_region(which_collector, idx + 1);
1444   }
1445   return transferred_regions;
1446 }
1447 
1448 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1449 size_t ShenandoahFreeSet::transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId collector_id,
1450                                                                                        size_t max_xfer_regions,
1451                                                                                        size_t& bytes_transferred) {
1452   shenandoah_assert_heaplocked();
1453   size_t transferred_regions = 0;
1454   idx_t rightmost = _partitions.rightmost(collector_id);
1455   for (idx_t idx = _partitions.leftmost(collector_id); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
1456     assert(_partitions.in_free_set(collector_id, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1457     size_t ac = alloc_capacity(idx);
1458     if (ac > 0) {
1459       _partitions.move_from_partition_to_partition(idx, collector_id, ShenandoahFreeSetPartitionId::Mutator, ac);
1460       transferred_regions++;
1461       bytes_transferred += ac;
1462     }
1463     idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1464   }
1465   return transferred_regions;
1466 }
1467 
1468 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
1469   size_t collector_xfer = 0;
1470   size_t old_collector_xfer = 0;
1471 
1472   // Process empty regions within the Collector free partition
1473   if ((max_xfer_regions > 0) &&
1474       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1475        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1476     ShenandoahHeapLocker locker(_heap->lock());
1477     max_xfer_regions -=
1478       transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1479                                                                collector_xfer);
1480   }
1481 
1482   // Process empty regions within the OldCollector free partition
1483   if ((max_xfer_regions > 0) &&
1484       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)
1485        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector))) {
1486     ShenandoahHeapLocker locker(_heap->lock());
1487     size_t old_collector_regions =
1488       transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions,
1489                                                                old_collector_xfer);
1490     max_xfer_regions -= old_collector_regions;
1491     if (old_collector_regions > 0) {
1492       ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions);
1493     }
1494   }
1495 
1496   // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1497   if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1498                                  <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1499     ShenandoahHeapLocker locker(_heap->lock());
1500     max_xfer_regions -=
1501       transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1502                                                                    collector_xfer);











1503   }
1504 
1505   size_t total_xfer = collector_xfer + old_collector_xfer;
1506   log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1507                      SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1508                      byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1509                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1510                      byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer));
1511 }
1512 
1513 
1514 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1515 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
1516                                            size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
1517   shenandoah_assert_heaplocked();
1518   // This resets all state information, removing all regions from all sets.
1519   clear();
1520   log_debug(gc, free)("Rebuilding FreeSet");
1521 
1522   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1523   // mutator set otherwise.  All trashed (cset) regions are affiliated young and placed in mutator set.
1524   find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
1525 }
1526 
1527 void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count) {
1528   assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity");
1529   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1530     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
1531     ShenandoahOldGeneration* old_gen = heap->old_generation();
1532     ShenandoahYoungGeneration* young_gen = heap->young_generation();
1533     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1534 
1535     old_gen->set_capacity(old_region_count * region_size_bytes);
1536     young_gen->set_capacity(young_region_count * region_size_bytes);
1537   }
1538 }
1539 
1540 void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count,
1541                                        bool have_evacuation_reserves) {
1542   shenandoah_assert_heaplocked();
1543   size_t young_reserve(0), old_reserve(0);
1544 
1545   if (_heap->mode()->is_generational()) {
1546     compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
1547                                    young_reserve, old_reserve);






1548   } else {
1549     young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1550     old_reserve = 0;
1551   }
1552 
1553   // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy
1554   // young_reserve and old_reserve.
1555   reserve_regions(young_reserve, old_reserve, old_region_count);
1556   size_t young_region_count = _heap->num_regions() - old_region_count;
1557   establish_generation_sizes(young_region_count, old_region_count);
1558   establish_old_collector_alloc_bias();
1559   _partitions.assert_bounds();
1560   log_status();
1561 }
1562 
1563 void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
1564                                                        bool have_evacuation_reserves,
1565                                                        size_t& young_reserve_result, size_t& old_reserve_result) const {
1566   shenandoah_assert_generational();
1567   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1568 
1569   ShenandoahOldGeneration* const old_generation = _heap->old_generation();
1570   size_t old_available = old_generation->available();
1571   size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions();
1572   ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
1573   size_t young_capacity = young_generation->max_capacity();
1574   size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
1575 
1576   // Add in the regions we anticipate to be freed by evacuation of the collection set
1577   old_unaffiliated_regions += old_cset_regions;
1578   young_unaffiliated_regions += young_cset_regions;
1579 
1580   // Consult old-region balance to make adjustments to current generation capacities and availability.
1581   // The generation region transfers take place after we rebuild.
1582   const ssize_t old_region_balance = old_generation->get_region_balance();
1583   if (old_region_balance != 0) {
1584 #ifdef ASSERT
1585     if (old_region_balance > 0) {
1586       assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1587     } else {
1588       assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1589     }
1590 #endif
1591 
1592     ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
1593     old_available -= xfer_bytes;
1594     old_unaffiliated_regions -= old_region_balance;
1595     young_capacity += xfer_bytes;
1596     young_unaffiliated_regions += old_region_balance;
1597   }
1598 
1599   // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1600   // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
1601   // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
1602   // each PLAB's available memory.
1603   if (have_evacuation_reserves) {
1604     // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1605     const size_t promoted_reserve = old_generation->get_promoted_reserve();
1606     const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
1607     young_reserve_result = young_generation->get_evacuation_reserve();
1608     old_reserve_result = promoted_reserve + old_evac_reserve;
1609     assert(old_reserve_result <= old_available,
1610            "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1611            promoted_reserve, old_evac_reserve, old_available);
1612   } else {
1613     // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1614     young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
1615     // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1616     // Affiliated old-gen regions are already in the OldCollector free set.  Add in the relevant number of
1617     // unaffiliated regions.
1618     old_reserve_result = old_available;
1619   }
1620 
1621   // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1622   // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
1623   // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1624   // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1625   if (old_reserve_result >
1626       _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1627     old_reserve_result =
1628       _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
1629   }
1630 
1631   if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
1632     young_reserve_result = young_unaffiliated_regions * region_size_bytes;
1633   }
1634 }
1635 
1636 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1637 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1638 // into the collector set or old collector set in order to assure that the memory available for allocations within
1639 // the collector set is at least to_reserve and the memory available for allocations within the old collector set
1640 // is at least to_reserve_old.
1641 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) {
1642   for (size_t i = _heap->num_regions(); i > 0; i--) {
1643     size_t idx = i - 1;
1644     ShenandoahHeapRegion* r = _heap->get_region(idx);

1645     if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1646       continue;
1647     }
1648 
1649     size_t ac = alloc_capacity(r);
1650     assert (ac > 0, "Membership in free set implies has capacity");
1651     assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD");
1652 
1653     bool move_to_old_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old;
1654     bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1655 
1656     if (!move_to_collector && !move_to_old_collector) {
1657       // We've satisfied both to_reserve and to_reserved_old
1658       break;
1659     }
1660 
1661     if (move_to_old_collector) {
1662       // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher
1663       // addresses than Collector regions.  Presumably, OldCollector regions are more "stable" and less likely to
1664       // be collected in the near future.
1665       if (r->is_trash() || !r->is_affiliated()) {
1666         // OLD regions that have available memory are already in the old_collector free set.
1667         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1668                                                      ShenandoahFreeSetPartitionId::OldCollector, ac);
1669         log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1670         log_debug(gc)("  Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1671                       "  Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1672                       _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1673                       _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1674                       _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1675                       _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1676         old_region_count++;
1677         continue;
1678       }
1679     }
1680 
1681     if (move_to_collector) {
1682       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1683       // they were entirely empty.  This has the effect of causing new Mutator allocation to reside next to objects
1684       // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1685       // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1686       // survivor objects is less likely to be selected for the collection set.  This alternative implementation allows
1687       // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1688       // occupy regions comprised entirely of ephemeral objects.  These regions are highly likely to be included in the next
1689       // collection set, and they are easily evacuated because they have low density of live objects.
1690       _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1691                                                    ShenandoahFreeSetPartitionId::Collector, ac);
1692       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1693       log_debug(gc)("  Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1694                     "  Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1695                     _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1696                     _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1697                     _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1698                     _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));
1699     }
1700   }
1701 
1702   if (LogTarget(Info, gc, free)::is_enabled()) {
1703     size_t old_reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector);
1704     if (old_reserve < to_reserve_old) {
1705       log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
1706                          PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
1707     }
1708     size_t reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector);
1709     if (reserve < to_reserve) {
1710       log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1711                     PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
1712     }
1713   }
1714 }
1715 
1716 void ShenandoahFreeSet::establish_old_collector_alloc_bias() {
1717   ShenandoahHeap* heap = ShenandoahHeap::heap();
1718   shenandoah_assert_heaplocked();
1719 
1720   idx_t left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
1721   idx_t right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector);
1722   idx_t middle = (left_idx + right_idx) / 2;
1723   size_t available_in_first_half = 0;
1724   size_t available_in_second_half = 0;
1725 
1726   for (idx_t index = left_idx; index < middle; index++) {
1727     if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1728       ShenandoahHeapRegion* r = heap->get_region((size_t) index);
1729       available_in_first_half += r->free();
1730     }
1731   }
1732   for (idx_t index = middle; index <= right_idx; index++) {
1733     if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1734       ShenandoahHeapRegion* r = heap->get_region(index);
1735       available_in_second_half += r->free();
1736     }
1737   }
1738 
1739   // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
1740   // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
1741   // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
1742   // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
1743 
1744   // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the
1745   // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left.
1746   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector,
1747                                           (available_in_second_half > available_in_first_half));
1748 }
1749 
1750 void ShenandoahFreeSet::log_status_under_lock() {
1751   // Must not be heap locked, it acquires heap lock only when log is enabled
1752   shenandoah_assert_not_heaplocked();
1753   if (LogTarget(Info, gc, free)::is_enabled()
1754       DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
1755     ShenandoahHeapLocker locker(_heap->lock());
1756     log_status();
1757   }
1758 }
1759 
1760 void ShenandoahFreeSet::log_status() {
1761   shenandoah_assert_heaplocked();
1762 
1763 #ifdef ASSERT
1764   // Dump of the FreeSet details is only enabled if assertions are enabled
1765   if (LogTarget(Debug, gc, free)::is_enabled()) {
1766 #define BUFFER_SIZE 80
1767     size_t retired_old = 0;
1768     size_t retired_old_humongous = 0;
1769     size_t retired_young = 0;
1770     size_t retired_young_humongous = 0;
1771     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1772     size_t retired_young_waste = 0;
1773     size_t retired_old_waste = 0;
1774     size_t consumed_collector = 0;
1775     size_t consumed_old_collector = 0;
1776     size_t consumed_mutator = 0;
1777     size_t available_old = 0;
1778     size_t available_young = 0;
1779     size_t available_mutator = 0;
1780     size_t available_collector = 0;
1781     size_t available_old_collector = 0;
1782 
1783     char buffer[BUFFER_SIZE];
1784     for (uint i = 0; i < BUFFER_SIZE; i++) {
1785       buffer[i] = '\0';
1786     }
1787 
1788     log_debug(gc)("FreeSet map legend:"
1789                        " M:mutator_free C:collector_free O:old_collector_free"
1790                        " H:humongous ~:retired old _:retired young");
1791     log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocating from %s, "
1792                   " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1793                   "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
1794                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1795                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1796                   _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left",
1797                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1798                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector),
1799                   _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1800                   _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector),
1801                   _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left");
1802 
1803     for (uint i = 0; i < _heap->num_regions(); i++) {
1804       ShenandoahHeapRegion *r = _heap->get_region(i);
1805       uint idx = i % 64;
1806       if ((i != 0) && (idx == 0)) {
1807         log_debug(gc)(" %6u: %s", i-64, buffer);
1808       }
1809       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
1810         size_t capacity = alloc_capacity(r);
1811         assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in mutator_free set");
1812         available_mutator += capacity;
1813         consumed_mutator += region_size_bytes - capacity;
1814         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1815       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
1816         size_t capacity = alloc_capacity(r);
1817         assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in collector_free set");
1818         available_collector += capacity;
1819         consumed_collector += region_size_bytes - capacity;
1820         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
1821       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, i)) {
1822         size_t capacity = alloc_capacity(r);
1823         available_old_collector += capacity;
1824         consumed_old_collector += region_size_bytes - capacity;
1825         buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
1826       } else if (r->is_humongous()) {
1827         if (r->is_old()) {
1828           buffer[idx] = 'H';
1829           retired_old_humongous += region_size_bytes;
1830         } else {
1831           buffer[idx] = 'h';
1832           retired_young_humongous += region_size_bytes;
1833         }
1834       } else {
1835         if (r->is_old()) {
1836           buffer[idx] = '~';
1837           retired_old_waste += alloc_capacity(r);
1838           retired_old += region_size_bytes;
1839         } else {
1840           buffer[idx] = '_';
1841           retired_young_waste += alloc_capacity(r);
1842           retired_young += region_size_bytes;
1843         }
1844       }
1845     }
1846     uint remnant = _heap->num_regions() % 64;
1847     if (remnant > 0) {
1848       buffer[remnant] = '\0';
1849     } else {
1850       remnant = 64;
1851     }
1852     log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1853   }
1854 #endif
1855 
1856   LogTarget(Info, gc, free) lt;
1857   if (lt.is_enabled()) {
1858     ResourceMark rm;
1859     LogStream ls(lt);
1860 
1861     {
1862       idx_t last_idx = 0;
1863       size_t max = 0;

1881             } else {
1882               empty_contig = 1;
1883             }
1884           } else {
1885             empty_contig = 0;
1886           }
1887           total_used += r->used();
1888           total_free += free;
1889           max_contig = MAX2(max_contig, empty_contig);
1890           last_idx = idx;
1891         }
1892       }
1893 
1894       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1895       size_t free = capacity() - used();
1896 
1897       // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1898       // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1899       // my internally tracked values of used() and free().
1900       assert(free == total_free, "Free memory should match");

1901       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1902                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
1903                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1904                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1905       );
1906 
1907       ls.print("Frag: ");
1908       size_t frag_ext;
1909       if (total_free_ext > 0) {
1910         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1911       } else {
1912         frag_ext = 0;
1913       }
1914       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1915 
1916       size_t frag_int;
1917       if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1918         frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1919                     / ShenandoahHeapRegion::region_size_bytes());
1920       } else {
1921         frag_int = 0;
1922       }

1929     {
1930       size_t max = 0;
1931       size_t total_free = 0;
1932       size_t total_used = 0;
1933 
1934       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1935            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1936         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
1937           ShenandoahHeapRegion *r = _heap->get_region(idx);
1938           size_t free = alloc_capacity(r);
1939           max = MAX2(max, free);
1940           total_free += free;
1941           total_used += r->used();
1942         }
1943       }
1944       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1945                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1946                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1947                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1948     }
1949 
1950     if (_heap->mode()->is_generational()) {
1951       size_t max = 0;
1952       size_t total_free = 0;
1953       size_t total_used = 0;
1954 
1955       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
1956            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); idx++) {
1957         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, idx)) {
1958           ShenandoahHeapRegion *r = _heap->get_region(idx);
1959           size_t free = alloc_capacity(r);
1960           max = MAX2(max, free);
1961           total_free += free;
1962           total_used += r->used();
1963         }
1964       }
1965       ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1966                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1967                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1968                   byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1969     }
1970   }
1971 }
1972 
1973 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1974   shenandoah_assert_heaplocked();
1975   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
1976     switch (req.type()) {
1977       case ShenandoahAllocRequest::_alloc_shared:
1978       case ShenandoahAllocRequest::_alloc_shared_gc:
1979         in_new_region = true;
1980         return allocate_contiguous(req);
1981       case ShenandoahAllocRequest::_alloc_plab:
1982       case ShenandoahAllocRequest::_alloc_gclab:
1983       case ShenandoahAllocRequest::_alloc_tlab:
1984         in_new_region = false;
1985         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
1986                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
1987         return nullptr;
1988       default:
1989         ShouldNotReachHere();
1990         return nullptr;
1991     }
1992   } else {
1993     return allocate_single(req, in_new_region);
1994   }
1995 }
1996 
1997 void ShenandoahFreeSet::print_on(outputStream* out) const {
1998   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
1999   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
2000   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
2001     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
2002            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2003     _heap->get_region(index)->print_on(out);
2004     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
2005   }
2006   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
2007   rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
2008   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
2009     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
2010            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2011     _heap->get_region(index)->print_on(out);
2012     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);
2013   }
2014   if (_heap->mode()->is_generational()) {
2015     out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector));
2016     for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
2017          index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) {
2018       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
2019         _heap->get_region(index)->print_on(out);
2020       }
2021     }
2022   }
2023 }
2024 
2025 double ShenandoahFreeSet::internal_fragmentation() {
2026   double squared = 0;
2027   double linear = 0;
2028   int count = 0;
2029 
2030   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
2031   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
2032     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
2033            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2034     ShenandoahHeapRegion* r = _heap->get_region(index);
2035     size_t used = r->used();
2036     squared += used * used;
2037     linear += used;
2038     count++;
2039     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
2040   }
2041 
2042   if (count > 0) {
< prev index next >