1 /*
   2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "runtime/orderAccess.hpp"
  40 
  41 ShenandoahSetsOfFree::ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set) :
  42     _max(max_regions),
  43     _free_set(free_set),
  44     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes())
  45 {
  46   _membership = NEW_C_HEAP_ARRAY(ShenandoahFreeMemoryType, max_regions, mtGC);
  47   clear_internal();
  48 }
  49 
  50 ShenandoahSetsOfFree::~ShenandoahSetsOfFree() {
  51   FREE_C_HEAP_ARRAY(ShenandoahFreeMemoryType, _membership);
  52 }
  53 
  54 
  55 void ShenandoahSetsOfFree::clear_internal() {
  56   for (size_t idx = 0; idx < _max; idx++) {
  57     _membership[idx] = NotFree;
  58   }
  59 
  60   for (size_t idx = 0; idx < NumFreeSets; idx++) {
  61     _leftmosts[idx] = _max;
  62     _rightmosts[idx] = 0;
  63     _leftmosts_empty[idx] = _max;
  64     _rightmosts_empty[idx] = 0;
  65     _capacity_of[idx] = 0;
  66     _used_by[idx] = 0;
  67   }
  68 
  69   _left_to_right_bias[Mutator] = true;
  70   _left_to_right_bias[Collector] = false;
  71   _left_to_right_bias[OldCollector] = false;
  72 
  73   _region_counts[Mutator] = 0;
  74   _region_counts[Collector] = 0;
  75   _region_counts[OldCollector] = 0;
  76   _region_counts[NotFree] = _max;
  77 }
  78 
  79 void ShenandoahSetsOfFree::clear_all() {
  80   clear_internal();
  81 }
  82 
  83 void ShenandoahSetsOfFree::increase_used(ShenandoahFreeMemoryType which_set, size_t bytes) {
  84   assert (which_set > NotFree && which_set < NumFreeSets, "Set must correspond to a valid freeset");
  85   _used_by[which_set] += bytes;
  86   assert (_used_by[which_set] <= _capacity_of[which_set],
  87           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
  88           _used_by[which_set], _capacity_of[which_set], bytes);
  89 }
  90 
  91 inline void ShenandoahSetsOfFree::shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx) {
  92   if (idx == _leftmosts[set]) {
  93     while ((_leftmosts[set] < _max) && !in_free_set(_leftmosts[set], set)) {
  94       _leftmosts[set]++;
  95     }
  96     if (_leftmosts_empty[set] < _leftmosts[set]) {
  97       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
  98       _leftmosts_empty[set] = _leftmosts[set];
  99     }
 100   }
 101   if (idx == _rightmosts[set]) {
 102     while (_rightmosts[set] > 0 && !in_free_set(_rightmosts[set], set)) {
 103       _rightmosts[set]--;
 104     }
 105     if (_rightmosts_empty[set] > _rightmosts[set]) {
 106       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 107       _rightmosts_empty[set] = _rightmosts[set];
 108     }
 109   }
 110 }
 111 
 112 inline void ShenandoahSetsOfFree::expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t region_capacity) {
 113   if (region_capacity == _region_size_bytes) {
 114     if (_leftmosts_empty[set] > idx) {
 115       _leftmosts_empty[set] = idx;
 116     }
 117     if (_rightmosts_empty[set] < idx) {
 118       _rightmosts_empty[set] = idx;
 119     }
 120   }
 121   if (_leftmosts[set] > idx) {
 122     _leftmosts[set] = idx;
 123   }
 124   if (_rightmosts[set] < idx) {
 125     _rightmosts[set] = idx;
 126   }
 127 }
 128 
 129 void ShenandoahSetsOfFree::remove_from_free_sets(size_t idx) {
 130   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 131   ShenandoahFreeMemoryType orig_set = membership(idx);
 132   assert (orig_set > NotFree && orig_set < NumFreeSets, "Cannot remove from free sets if not already free");
 133   _membership[idx] = NotFree;
 134   shrink_bounds_if_touched(orig_set, idx);
 135 
 136   _region_counts[orig_set]--;
 137   _region_counts[NotFree]++;
 138 }
 139 
 140 
 141 void ShenandoahSetsOfFree::make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity) {
 142   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 143   assert (_membership[idx] == NotFree, "Cannot make free if already free");
 144   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 145   _membership[idx] = which_set;
 146   _capacity_of[which_set] += region_capacity;
 147   expand_bounds_maybe(which_set, idx, region_capacity);
 148 
 149   _region_counts[NotFree]--;
 150   _region_counts[which_set]++;
 151 }
 152 
 153 void ShenandoahSetsOfFree::move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity) {
 154   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 155   assert ((new_set > NotFree) && (new_set < NumFreeSets), "New set must be valid");
 156   ShenandoahFreeMemoryType orig_set = _membership[idx];
 157   assert ((orig_set > NotFree) && (orig_set < NumFreeSets), "Cannot move free unless already free");
 158   // Expected transitions:
 159   //  During rebuild: Mutator => Collector
 160   //                  Mutator empty => Collector
 161   //  During flip_to_gc:
 162   //                  Mutator empty => Collector
 163   //                  Mutator empty => Old Collector
 164   // At start of update refs:
 165   //                  Collector => Mutator
 166   //                  OldCollector Empty => Mutator
 167   assert (((region_capacity <= _region_size_bytes) &&
 168            ((orig_set == Mutator) && (new_set == Collector)) ||
 169            ((orig_set == Collector) && (new_set == Mutator))) ||
 170           ((region_capacity == _region_size_bytes) &&
 171            ((orig_set == Mutator) && (new_set == Collector)) ||
 172            ((orig_set == OldCollector) && (new_set == Mutator)) ||
 173            (new_set == OldCollector)), "Unexpected movement between sets");
 174 
 175   _membership[idx] = new_set;
 176   _capacity_of[orig_set] -= region_capacity;
 177   shrink_bounds_if_touched(orig_set, idx);
 178 
 179   _capacity_of[new_set] += region_capacity;
 180   expand_bounds_maybe(new_set, idx, region_capacity);
 181 
 182   _region_counts[orig_set]--;
 183   _region_counts[new_set]++;
 184 }
 185 
 186 inline ShenandoahFreeMemoryType ShenandoahSetsOfFree::membership(size_t idx) const {
 187   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 188   return _membership[idx];
 189 }
 190 
 191   // Returns true iff region idx is in the test_set free_set.  Before returning true, asserts that the free
 192   // set is not empty.  Requires that test_set != NotFree or NumFreeSets.
 193 inline bool ShenandoahSetsOfFree::in_free_set(size_t idx, ShenandoahFreeMemoryType test_set) const {
 194   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 195   if (_membership[idx] == test_set) {
 196     assert (test_set == NotFree || _free_set->alloc_capacity(idx) > 0, "Free regions must have alloc capacity");
 197     return true;
 198   } else {
 199     return false;
 200   }
 201 }
 202 
 203 inline size_t ShenandoahSetsOfFree::leftmost(ShenandoahFreeMemoryType which_set) const {
 204   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 205   size_t idx = _leftmosts[which_set];
 206   if (idx >= _max) {
 207     return _max;
 208   } else {
 209     assert (in_free_set(idx, which_set), "left-most region must be free");
 210     return idx;
 211   }
 212 }
 213 
 214 inline size_t ShenandoahSetsOfFree::rightmost(ShenandoahFreeMemoryType which_set) const {
 215   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 216   size_t idx = _rightmosts[which_set];
 217   assert ((_leftmosts[which_set] == _max) || in_free_set(idx, which_set), "right-most region must be free");
 218   return idx;
 219 }
 220 
 221 size_t ShenandoahSetsOfFree::leftmost_empty(ShenandoahFreeMemoryType which_set) {
 222   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 223   for (size_t idx = _leftmosts_empty[which_set]; idx < _max; idx++) {
 224     if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
 225       _leftmosts_empty[which_set] = idx;
 226       return idx;
 227     }
 228   }
 229   _leftmosts_empty[which_set] = _max;
 230   _rightmosts_empty[which_set] = 0;
 231   return _max;
 232 }
 233 
 234 inline size_t ShenandoahSetsOfFree::rightmost_empty(ShenandoahFreeMemoryType which_set) {
 235   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 236   for (intptr_t idx = _rightmosts_empty[which_set]; idx >= 0; idx--) {
 237     if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
 238       _rightmosts_empty[which_set] = idx;
 239       return idx;
 240     }
 241   }
 242   _leftmosts_empty[which_set] = _max;
 243   _rightmosts_empty[which_set] = 0;
 244   return 0;
 245 }
 246 
 247 inline bool ShenandoahSetsOfFree::alloc_from_left_bias(ShenandoahFreeMemoryType which_set) {
 248   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 249   return _left_to_right_bias[which_set];
 250 }
 251 
 252 void ShenandoahSetsOfFree::establish_alloc_bias(ShenandoahFreeMemoryType which_set) {
 253   ShenandoahHeap* heap = ShenandoahHeap::heap();
 254   shenandoah_assert_heaplocked();
 255   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 256 
 257   size_t middle = (_leftmosts[which_set] + _rightmosts[which_set]) / 2;
 258   size_t available_in_first_half = 0;
 259   size_t available_in_second_half = 0;
 260 
 261   for (size_t index = _leftmosts[which_set]; index < middle; index++) {
 262     if (in_free_set(index, which_set)) {
 263       ShenandoahHeapRegion* r = heap->get_region(index);
 264       available_in_first_half += r->free();
 265     }
 266   }
 267   for (size_t index = middle; index <= _rightmosts[which_set]; index++) {
 268     if (in_free_set(index, which_set)) {
 269       ShenandoahHeapRegion* r = heap->get_region(index);
 270       available_in_second_half += r->free();
 271     }
 272   }
 273 
 274   // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
 275   // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
 276   // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
 277   // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
 278 
 279   // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the
 280   // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left.
 281   _left_to_right_bias[which_set] = (available_in_second_half > available_in_first_half);
 282 }
 283 
 284 #ifdef ASSERT
 285 void ShenandoahSetsOfFree::assert_bounds() {
 286 
 287   size_t leftmosts[NumFreeSets];
 288   size_t rightmosts[NumFreeSets];
 289   size_t empty_leftmosts[NumFreeSets];
 290   size_t empty_rightmosts[NumFreeSets];
 291 
 292   for (int i = 0; i < NumFreeSets; i++) {
 293     leftmosts[i] = _max;
 294     empty_leftmosts[i] = _max;
 295     rightmosts[i] = 0;
 296     empty_rightmosts[i] = 0;
 297   }
 298 
 299   for (size_t i = 0; i < _max; i++) {
 300     ShenandoahFreeMemoryType set = membership(i);
 301     switch (set) {
 302       case NotFree:
 303         break;
 304 
 305       case Mutator:
 306       case Collector:
 307       case OldCollector:
 308       {
 309         size_t capacity = _free_set->alloc_capacity(i);
 310         bool is_empty = (capacity == _region_size_bytes);
 311         assert(capacity > 0, "free regions must have allocation capacity");
 312         if (i < leftmosts[set]) {
 313           leftmosts[set] = i;
 314         }
 315         if (is_empty && (i < empty_leftmosts[set])) {
 316           empty_leftmosts[set] = i;
 317         }
 318         if (i > rightmosts[set]) {
 319           rightmosts[set] = i;
 320         }
 321         if (is_empty && (i > empty_rightmosts[set])) {
 322           empty_rightmosts[set] = i;
 323         }
 324         break;
 325       }
 326 
 327       case NumFreeSets:
 328       default:
 329         ShouldNotReachHere();
 330     }
 331   }
 332 
 333   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 334   assert (leftmost(Mutator) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Mutator),  _max);
 335   assert (rightmost(Mutator) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Mutator),  _max);
 336 
 337   assert (leftmost(Mutator) == _max || in_free_set(leftmost(Mutator), Mutator),
 338           "leftmost region should be free: " SIZE_FORMAT,  leftmost(Mutator));
 339   assert (leftmost(Mutator) == _max || in_free_set(rightmost(Mutator), Mutator),
 340           "rightmost region should be free: " SIZE_FORMAT, rightmost(Mutator));
 341 
 342   // If Mutator set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 343   size_t beg_off = leftmosts[Mutator];
 344   size_t end_off = rightmosts[Mutator];
 345   assert (beg_off >= leftmost(Mutator),
 346           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Mutator));
 347   assert (end_off <= rightmost(Mutator),
 348           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(Mutator));
 349 
 350   beg_off = empty_leftmosts[Mutator];
 351   end_off = empty_rightmosts[Mutator];
 352   assert (beg_off >= leftmost_empty(Mutator),
 353           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Mutator));
 354   assert (end_off <= rightmost_empty(Mutator),
 355           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(Mutator));
 356 
 357   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 358   assert (leftmost(Collector) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Collector),  _max);
 359   assert (rightmost(Collector) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Collector),  _max);
 360 
 361   assert (leftmost(Collector) == _max || in_free_set(leftmost(Collector), Collector),
 362           "leftmost region should be free: " SIZE_FORMAT,  leftmost(Collector));
 363   assert (leftmost(Collector) == _max || in_free_set(rightmost(Collector), Collector),
 364           "rightmost region should be free: " SIZE_FORMAT, rightmost(Collector));
 365 
 366   // If Collector set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 367   beg_off = leftmosts[Collector];
 368   end_off = rightmosts[Collector];
 369   assert (beg_off >= leftmost(Collector),
 370           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Collector));
 371   assert (end_off <= rightmost(Collector),
 372           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(Collector));
 373 
 374   beg_off = empty_leftmosts[Collector];
 375   end_off = empty_rightmosts[Collector];
 376   assert (beg_off >= leftmost_empty(Collector),
 377           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Collector));
 378   assert (end_off <= rightmost_empty(Collector),
 379           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(Collector));
 380 
 381   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 382   assert (leftmost(OldCollector) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(OldCollector),  _max);
 383   assert (rightmost(OldCollector) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(OldCollector),  _max);
 384 
 385   assert (leftmost(OldCollector) == _max || in_free_set(leftmost(OldCollector), OldCollector),
 386           "leftmost region should be free: " SIZE_FORMAT,  leftmost(OldCollector));
 387   assert (leftmost(OldCollector) == _max || in_free_set(rightmost(OldCollector), OldCollector),
 388           "rightmost region should be free: " SIZE_FORMAT, rightmost(OldCollector));
 389 
 390   // If OldCollector set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 391   beg_off = leftmosts[OldCollector];
 392   end_off = rightmosts[OldCollector];
 393   assert (beg_off >= leftmost(OldCollector),
 394           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(OldCollector));
 395   assert (end_off <= rightmost(OldCollector),
 396           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(OldCollector));
 397 
 398   beg_off = empty_leftmosts[OldCollector];
 399   end_off = empty_rightmosts[OldCollector];
 400   assert (beg_off >= leftmost_empty(OldCollector),
 401           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(OldCollector));
 402   assert (end_off <= rightmost_empty(OldCollector),
 403           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(OldCollector));
 404 }
 405 #endif
 406 
 407 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 408   _heap(heap),
 409   _free_sets(max_regions, this)
 410 {
 411   clear_internal();
 412 }
 413 
 414 // This allocates from a region within the old_collector_set.  If affiliation equals OLD, the allocation must be taken
 415 // from a region that is_old().  Otherwise, affiliation should be FREE, in which case this will put a previously unaffiliated
 416 // region into service.
 417 HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation affiliation,
 418                                                            ShenandoahAllocRequest& req, bool& in_new_region) {
 419   shenandoah_assert_heaplocked();
 420 
 421   size_t rightmost =
 422     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(OldCollector): _free_sets.rightmost(OldCollector);
 423   size_t leftmost =
 424     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(OldCollector): _free_sets.leftmost(OldCollector);
 425   if (_free_sets.alloc_from_left_bias(OldCollector)) {
 426     // This mode picks up stragglers left by a full GC
 427     for (size_t idx = leftmost; idx <= rightmost; idx++) {
 428       if (_free_sets.in_free_set(idx, OldCollector)) {
 429         ShenandoahHeapRegion* r = _heap->get_region(idx);
 430         assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
 431         if (r->affiliation() == affiliation) {
 432           HeapWord* result = try_allocate_in(r, req, in_new_region);
 433           if (result != nullptr) {
 434             return result;
 435           }
 436         }
 437       }
 438     }
 439   } else {
 440     // This mode picks up stragglers left by a previous concurrent GC
 441     for (size_t count = rightmost + 1; count > leftmost; count--) {
 442       // size_t is unsigned, need to dodge underflow when _leftmost = 0
 443       size_t idx = count - 1;
 444       if (_free_sets.in_free_set(idx, OldCollector)) {
 445         ShenandoahHeapRegion* r = _heap->get_region(idx);
 446         assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
 447         if (r->affiliation() == affiliation) {
 448           HeapWord* result = try_allocate_in(r, req, in_new_region);
 449           if (result != nullptr) {
 450             return result;
 451           }
 452         }
 453       }
 454     }
 455   }
 456   return nullptr;
 457 }
 458 
 459 void ShenandoahFreeSet::add_old_collector_free_region(ShenandoahHeapRegion* region) {
 460   shenandoah_assert_heaplocked();
 461   size_t idx = region->index();
 462   size_t capacity = alloc_capacity(region);
 463   assert(_free_sets.membership(idx) == NotFree, "Regions promoted in place should not be in any free set");
 464   if (capacity >= PLAB::min_size() * HeapWordSize) {
 465     _free_sets.make_free(idx, OldCollector, capacity);
 466     _heap->augment_promo_reserve(capacity);
 467   }
 468 }
 469 
 470 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation,
 471                                                        ShenandoahAllocRequest& req, bool& in_new_region) {
 472   shenandoah_assert_heaplocked();
 473   size_t rightmost =
 474     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(Collector): _free_sets.rightmost(Collector);
 475   size_t leftmost =
 476     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(Collector): _free_sets.leftmost(Collector);
 477   for (size_t c = rightmost + 1; c > leftmost; c--) {
 478     // size_t is unsigned, need to dodge underflow when _leftmost = 0
 479     size_t idx = c - 1;
 480     if (_free_sets.in_free_set(idx, Collector)) {
 481       ShenandoahHeapRegion* r = _heap->get_region(idx);
 482       if (r->affiliation() == affiliation) {
 483         HeapWord* result = try_allocate_in(r, req, in_new_region);
 484         if (result != nullptr) {
 485           return result;
 486         }
 487       }
 488     }
 489   }
 490   log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
 491                       shenandoah_affiliation_name(affiliation), p2i(&req));
 492   return nullptr;
 493 }
 494 
 495 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 496   shenandoah_assert_heaplocked();
 497 
 498   // Scan the bitmap looking for a first fit.
 499   //
 500   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 501   // we would find the region to allocate at right away.
 502   //
 503   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
 504   // go to the end. This makes application allocation faster, because we would clear lots
 505   // of regions from the beginning most of the time.
 506   //
 507   // Free set maintains mutator and collector views, and normally they allocate in their views only,
 508   // unless we special cases for stealing and mixed allocations.
 509 
 510   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 511 
 512   bool allow_new_region = true;
 513   if (_heap->mode()->is_generational()) {
 514     switch (req.affiliation()) {
 515       case ShenandoahAffiliation::OLD_GENERATION:
 516         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 517         if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
 518           allow_new_region = false;
 519         }
 520         break;
 521 
 522       case ShenandoahAffiliation::YOUNG_GENERATION:
 523         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 524         if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
 525           allow_new_region = false;
 526         }
 527         break;
 528 
 529       case ShenandoahAffiliation::FREE:
 530         fatal("Should request affiliation");
 531 
 532       default:
 533         ShouldNotReachHere();
 534         break;
 535     }
 536   }
 537   switch (req.type()) {
 538     case ShenandoahAllocRequest::_alloc_tlab:
 539     case ShenandoahAllocRequest::_alloc_shared: {
 540       // Try to allocate in the mutator view
 541       for (size_t idx = _free_sets.leftmost(Mutator); idx <= _free_sets.rightmost(Mutator); idx++) {
 542         ShenandoahHeapRegion* r = _heap->get_region(idx);
 543         if (_free_sets.in_free_set(idx, Mutator) && (allow_new_region || r->is_affiliated())) {
 544           // try_allocate_in() increases used if the allocation is successful.
 545           HeapWord* result;
 546           size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 547           if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 548             return result;
 549           }
 550         }
 551       }
 552       // There is no recovery. Mutator does not touch collector view at all.
 553       break;
 554     }
 555     case ShenandoahAllocRequest::_alloc_gclab:
 556       // GCLABs are for evacuation so we must be in evacuation phase.  If this allocation is successful, increment
 557       // the relevant evac_expended rather than used value.
 558 
 559     case ShenandoahAllocRequest::_alloc_plab:
 560       // PLABs always reside in old-gen and are only allocated during evacuation phase.
 561 
 562     case ShenandoahAllocRequest::_alloc_shared_gc: {
 563       if (!_heap->mode()->is_generational()) {
 564         // size_t is unsigned, need to dodge underflow when _leftmost = 0
 565         // Fast-path: try to allocate in the collector view first
 566         for (size_t c = _free_sets.rightmost(Collector) + 1; c > _free_sets.leftmost(Collector); c--) {
 567           size_t idx = c - 1;
 568           if (_free_sets.in_free_set(idx, Collector)) {
 569             HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 570             if (result != nullptr) {
 571               return result;
 572             }
 573           }
 574         }
 575       } else {
 576         // First try to fit into a region that is already in use in the same generation.
 577         HeapWord* result;
 578         if (req.is_old()) {
 579           result = allocate_old_with_affiliation(req.affiliation(), req, in_new_region);
 580         } else {
 581           result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
 582         }
 583         if (result != nullptr) {
 584           return result;
 585         }
 586         if (allow_new_region) {
 587           // Then try a free region that is dedicated to GC allocations.
 588           if (req.is_old()) {
 589             result = allocate_old_with_affiliation(FREE, req, in_new_region);
 590           } else {
 591             result = allocate_with_affiliation(FREE, req, in_new_region);
 592           }
 593           if (result != nullptr) {
 594             return result;
 595           }
 596         }
 597       }
 598       // No dice. Can we borrow space from mutator view?
 599       if (!ShenandoahEvacReserveOverflow) {
 600         return nullptr;
 601       }
 602 
 603       if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
 604         // This allows us to flip a mutator region to old_collector
 605         allow_new_region = true;
 606       }
 607 
 608       // We should expand old-gen if this can prevent an old-gen evacuation failure.  We don't care so much about
 609       // promotion failures since they can be mitigated in a subsequent GC pass.  Would be nice to know if this
 610       // allocation request is for evacuation or promotion.  Individual threads limit their use of PLAB memory for
 611       // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
 612       // only for old-gen evacuations.
 613 
 614       // Also TODO:
 615       // if (GC is idle (out of cycle) and mutator allocation fails and there is memory reserved in Collector
 616       // or OldCollector sets, transfer a region of memory so that we can satisfy the allocation request, and
 617       // immediately trigger the start of GC.  Is better to satisfy the allocation than to trigger out-of-cycle
 618       // allocation failure (even if this means we have a little less memory to handle evacuations during the
 619       // subsequent GC pass).
 620 
 621       if (allow_new_region) {
 622         // Try to steal an empty region from the mutator view.
 623         for (size_t c = _free_sets.rightmost_empty(Mutator) + 1; c > _free_sets.leftmost_empty(Mutator); c--) {
 624           size_t idx = c - 1;
 625           if (_free_sets.in_free_set(idx, Mutator)) {
 626             ShenandoahHeapRegion* r = _heap->get_region(idx);
 627             if (can_allocate_from(r)) {
 628               if (req.is_old()) {
 629                 flip_to_old_gc(r);
 630               } else {
 631                 flip_to_gc(r);
 632               }
 633               HeapWord *result = try_allocate_in(r, req, in_new_region);
 634               if (result != nullptr) {
 635                 log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 636                 return result;
 637               }
 638             }
 639           }
 640         }
 641       }
 642 
 643       // No dice. Do not try to mix mutator and GC allocations, because
 644       // URWM moves due to GC allocations would expose unparsable mutator
 645       // allocations.
 646       break;
 647     }
 648     default:
 649       ShouldNotReachHere();
 650   }
 651   return nullptr;
 652 }
 653 
 654 // This work method takes an argument corresponding to the number of bytes
 655 // free in a region, and returns the largest amount in heapwords that can be allocated
 656 // such that both of the following conditions are satisfied:
 657 //
 658 // 1. it is a multiple of card size
 659 // 2. any remaining shard may be filled with a filler object
 660 //
 661 // The idea is that the allocation starts and ends at card boundaries. Because
 662 // a region ('s end) is card-aligned, the remainder shard that must be filled is
 663 // at the start of the free space.
 664 //
 665 // This is merely a helper method to use for the purpose of such a calculation.
 666 size_t get_usable_free_words(size_t free_bytes) {
 667   // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
 668   //      free is 514
 669   //      usable_free is 512, which is decreased to 0
 670   size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
 671   assert(usable_free <= free_bytes, "Sanity check");
 672   if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
 673     // After aligning to card multiples, the remainder would be smaller than
 674     // the minimum filler object, so we'll need to take away another card's
 675     // worth to construct a filler object.
 676     if (usable_free >= CardTable::card_size()) {
 677       usable_free -= CardTable::card_size();
 678     } else {
 679       assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
 680     }
 681   }
 682 
 683   return usable_free / HeapWordSize;
 684 }
 685 
 686 // Given a size argument, which is a multiple of card size, a request struct
 687 // for a PLAB, and an old region, return a pointer to the allocated space for
 688 // a PLAB which is card-aligned and where any remaining shard in the region
 689 // has been suitably filled by a filler object.
 690 // It is assumed (and assertion-checked) that such an allocation is always possible.
 691 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
 692   assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 693   assert(r->is_old(), "All PLABs reside in old-gen");
 694   assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
 695   assert(size % CardTable::card_size_in_words() == 0, "size must be multiple of card table size, was " SIZE_FORMAT, size);
 696 
 697   HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
 698   assert(result != nullptr, "Allocation cannot fail");
 699   assert(r->top() <= r->end(), "Allocation cannot span end of region");
 700   assert(req.actual_size() == size, "Should not have needed to adjust size for PLAB.");
 701   assert(((uintptr_t) result) % CardTable::card_size_in_words() == 0, "PLAB start must align with card boundary");
 702 
 703   return result;
 704 }
 705 
 706 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 707   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 708   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
 709     return nullptr;
 710   }
 711 
 712   try_recycle_trashed(r);
 713   if (!r->is_affiliated()) {
 714     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
 715     r->set_affiliation(req.affiliation());
 716     if (r->is_old()) {
 717       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
 718       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
 719       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
 720       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
 721       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
 722       // coalesce-and-fill processing.
 723       r->end_preemptible_coalesce_and_fill();
 724       _heap->clear_cards_for(r);
 725       _heap->old_generation()->increment_affiliated_region_count();
 726     } else {
 727       _heap->young_generation()->increment_affiliated_region_count();
 728     }
 729 
 730     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
 731     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
 732   } else if (r->affiliation() != req.affiliation()) {
 733     assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
 734            req.affiliation_name(), r->affiliation_name());
 735     return nullptr;
 736   }
 737 
 738   in_new_region = r->is_empty();
 739   HeapWord* result = nullptr;
 740 
 741   if (in_new_region) {
 742     log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 743                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
 744   }
 745 
 746   // req.size() is in words, r->free() is in bytes.
 747   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 748     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
 749       assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 750       assert(_free_sets.in_free_set(r->index(), OldCollector), "PLABS must be allocated in old_collector_free regions");
 751       // Need to assure that plabs are aligned on multiple of card region.
 752       // Since we have Elastic TLABs, align sizes up. They may be decreased to fit in the usable
 753       // memory remaining in the region (which will also be aligned to cards).
 754       size_t adjusted_size = align_up(req.size(), CardTable::card_size_in_words());
 755       size_t adjusted_min_size = align_up(req.min_size(), CardTable::card_size_in_words());
 756       size_t usable_free = get_usable_free_words(r->free());
 757 
 758       if (adjusted_size > usable_free) {
 759         adjusted_size = usable_free;
 760       }
 761 
 762       if (adjusted_size >= adjusted_min_size) {
 763         result = allocate_aligned_plab(adjusted_size, req, r);
 764       }
 765       // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
 766     } else {
 767       // This is a GCLAB or a TLAB allocation
 768       size_t adjusted_size = req.size();
 769       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 770       if (adjusted_size > free) {
 771         adjusted_size = free;
 772       }
 773       if (adjusted_size >= req.min_size()) {
 774         result = r->allocate(adjusted_size, req);
 775         assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
 776         req.set_actual_size(adjusted_size);
 777       } else {
 778         log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
 779                            " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
 780       }
 781     }
 782   } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) {
 783 
 784     // inelastic PLAB
 785     size_t size = req.size();
 786     size_t usable_free = get_usable_free_words(r->free());
 787     if (size <= usable_free) {
 788       result = allocate_aligned_plab(size, req, r);
 789     }
 790   } else {
 791     size_t size = req.size();
 792     result = r->allocate(size, req);
 793     if (result != nullptr) {
 794       // Record actual allocation size
 795       req.set_actual_size(size);
 796     }
 797   }
 798 
 799   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
 800   if (result != nullptr) {
 801     // Allocation successful, bump stats:
 802     if (req.is_mutator_alloc()) {
 803       assert(req.is_young(), "Mutator allocations always come from young generation.");
 804       _free_sets.increase_used(Mutator, req.actual_size() * HeapWordSize);
 805     } else {
 806       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
 807 
 808       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
 809       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
 810       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
 811       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
 812       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
 813       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
 814       // next evacuation pass.
 815       r->set_update_watermark(r->top());
 816       if (r->is_old()) {
 817         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
 818         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
 819       }
 820     }
 821   }
 822 
 823   if (result == nullptr || alloc_capacity(r) < PLAB::min_size() * HeapWordSize) {
 824     // Region cannot afford this and is likely to not afford future allocations. Retire it.
 825     //
 826     // While this seems a bit harsh, especially in the case when this large allocation does not
 827     // fit but the next small one would, we are risking to inflate scan times when lots of
 828     // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.
 829 
 830     // Record the remainder as allocation waste
 831     size_t idx = r->index();
 832     if (req.is_mutator_alloc()) {
 833       size_t waste = r->free();
 834       if (waste > 0) {
 835         _free_sets.increase_used(Mutator, waste);
 836         // This one request could cause several regions to be "retired", so we must accumulate the waste
 837         req.set_waste((waste >> LogHeapWordSize) + req.waste());
 838       }
 839       assert(_free_sets.membership(idx) == Mutator, "Must be mutator free: " SIZE_FORMAT, idx);
 840     } else {
 841       assert(_free_sets.membership(idx) == Collector || _free_sets.membership(idx) == OldCollector,
 842              "Must be collector or old-collector free: " SIZE_FORMAT, idx);
 843     }
 844     // This region is no longer considered free (in any set)
 845     _free_sets.remove_from_free_sets(idx);
 846     _free_sets.assert_bounds();
 847   }
 848   return result;
 849 }
 850 
 851 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 852   shenandoah_assert_heaplocked();
 853 
 854   size_t words_size = req.size();
 855   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 856 
 857   assert(req.is_young(), "Humongous regions always allocated in YOUNG");
 858   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
 859 
 860   // Check if there are enough regions left to satisfy allocation.
 861   if (_heap->mode()->is_generational()) {
 862     size_t avail_young_regions = generation->free_unaffiliated_regions();
 863     if (num > _free_sets.count(Mutator) || (num > avail_young_regions)) {
 864       return nullptr;
 865     }
 866   } else {
 867     if (num > _free_sets.count(Mutator)) {
 868       return nullptr;
 869     }
 870   }
 871 
 872   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 873   // inclusive. Contiguous allocations are biased to the beginning.
 874 
 875   size_t beg = _free_sets.leftmost(Mutator);
 876   size_t end = beg;
 877 
 878   while (true) {
 879     if (end >= _free_sets.max()) {
 880       // Hit the end, goodbye
 881       return nullptr;
 882     }
 883 
 884     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 885     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 886     if (!_free_sets.in_free_set(end, Mutator) || !can_allocate_from(_heap->get_region(end))) {
 887       end++;
 888       beg = end;
 889       continue;
 890     }
 891 
 892     if ((end - beg + 1) == num) {
 893       // found the match
 894       break;
 895     }
 896 
 897     end++;
 898   };
 899 
 900   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 901   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
 902 
 903   // Initialize regions:
 904   for (size_t i = beg; i <= end; i++) {
 905     ShenandoahHeapRegion* r = _heap->get_region(i);
 906     try_recycle_trashed(r);
 907 
 908     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 909     assert(r->is_empty(), "Should be empty");
 910 
 911     if (i == beg) {
 912       r->make_humongous_start();
 913     } else {
 914       r->make_humongous_cont();
 915     }
 916 
 917     // Trailing region may be non-full, record the remainder there
 918     size_t used_words;
 919     if ((i == end) && (remainder != 0)) {
 920       used_words = remainder;
 921     } else {
 922       used_words = ShenandoahHeapRegion::region_size_words();
 923     }
 924 
 925     r->set_affiliation(req.affiliation());
 926     r->set_update_watermark(r->bottom());
 927     r->set_top(r->bottom() + used_words);
 928 
 929     // While individual regions report their true use, all humongous regions are marked used in the free set.
 930     _free_sets.remove_from_free_sets(r->index());
 931   }
 932   _heap->young_generation()->increase_affiliated_region_count(num);
 933 
 934   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
 935   _free_sets.increase_used(Mutator, total_humongous_size);
 936   _free_sets.assert_bounds();
 937   req.set_actual_size(words_size);
 938   if (remainder != 0) {
 939     req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
 940   }
 941   return _heap->get_region(beg)->bottom();
 942 }
 943 
 944 // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
 945 // immediate trash and we'll be able to immediately recycle it.  Note that we cannot recycle immediate trash if
 946 // concurrent weak root processing is in progress.
 947 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 948   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 949 }
 950 
 951 bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 952   ShenandoahHeapRegion* r = _heap->get_region(idx);
 953   return can_allocate_from(r);
 954 }
 955 
 956 size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
 957   ShenandoahHeapRegion* r = _heap->get_region(idx);
 958   return alloc_capacity(r);
 959 }
 960 
 961 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 962   if (r->is_trash()) {
 963     // This would be recycled on allocation path
 964     return ShenandoahHeapRegion::region_size_bytes();
 965   } else {
 966     return r->free();
 967   }
 968 }
 969 
 970 bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
 971   return alloc_capacity(r) > 0;
 972 }
 973 
 974 bool ShenandoahFreeSet::has_alloc_capacity(size_t idx) const {
 975   ShenandoahHeapRegion* r = _heap->get_region(idx);
 976   return alloc_capacity(r) > 0;
 977 }
 978 
 979 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) const {
 980   return alloc_capacity(r) == 0;
 981 }
 982 
 983 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 984   if (r->is_trash()) {
 985     r->recycle();
 986   }
 987 }
 988 
 989 void ShenandoahFreeSet::recycle_trash() {
 990   // lock is not reentrable, check we don't have it
 991   shenandoah_assert_not_heaplocked();
 992 
 993   for (size_t i = 0; i < _heap->num_regions(); i++) {
 994     ShenandoahHeapRegion* r = _heap->get_region(i);
 995     if (r->is_trash()) {
 996       ShenandoahHeapLocker locker(_heap->lock());
 997       try_recycle_trashed(r);
 998     }
 999     SpinPause(); // allow allocators to take the lock
1000   }
1001 }
1002 
1003 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
1004   size_t idx = r->index();
1005 
1006   assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
1007   // Note: can_allocate_from(r) means r is entirely empty
1008   assert(can_allocate_from(r), "Should not be allocated");
1009 
1010   size_t region_capacity = alloc_capacity(r);
1011   _free_sets.move_to_set(idx, OldCollector, region_capacity);
1012   _free_sets.assert_bounds();
1013   _heap->generation_sizer()->force_transfer_to_old(1);
1014   _heap->augment_old_evac_reserve(region_capacity);
1015   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1016   // to recycle trash before attempting to allocate anything in the region.
1017 }
1018 
1019 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1020   size_t idx = r->index();
1021 
1022   assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
1023   assert(can_allocate_from(r), "Should not be allocated");
1024 
1025   size_t region_capacity = alloc_capacity(r);
1026   _free_sets.move_to_set(idx, Collector, region_capacity);
1027   _free_sets.assert_bounds();
1028 
1029   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1030   // to recycle trash before attempting to allocate anything in the region.
1031 }
1032 
1033 void ShenandoahFreeSet::clear() {
1034   shenandoah_assert_heaplocked();
1035   clear_internal();
1036 }
1037 
1038 void ShenandoahFreeSet::clear_internal() {
1039   _free_sets.clear_all();
1040 }
1041 
1042 // This function places all is_old() regions that have allocation capacity into the old_collector set.  It places
1043 // all other regions (not is_old()) that have allocation capacity into the mutator_set.  Subsequently, we will
1044 // move some of the mutator regions into the collector set or old_collector set with the intent of packing
1045 // old_collector memory into the highest (rightmost) addresses of the heap and the collector memory into the
1046 // next highest addresses of the heap, with mutator memory consuming the lowest addresses of the heap.
1047 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions) {
1048 
1049   old_cset_regions = 0;
1050   young_cset_regions = 0;
1051   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
1052     ShenandoahHeapRegion* region = _heap->get_region(idx);
1053     if (region->is_trash()) {
1054       // Trashed regions represent regions that had been in the collection set but have not yet been "cleaned up".
1055       if (region->is_old()) {
1056         old_cset_regions++;
1057       } else {
1058         assert(region->is_young(), "Trashed region should be old or young");
1059         young_cset_regions++;
1060       }
1061     }
1062     if (region->is_alloc_allowed() || region->is_trash()) {
1063       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1064       assert(_free_sets.in_free_set(idx, NotFree), "We are about to make region free; it should not be free already");
1065 
1066       // Do not add regions that would almost surely fail allocation
1067       if (alloc_capacity(region) < PLAB::min_size() * HeapWordSize) continue;
1068 
1069       if (region->is_old()) {
1070         _free_sets.make_free(idx, OldCollector, alloc_capacity(region));
1071         log_debug(gc, free)(
1072           "  Adding Region " SIZE_FORMAT  " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to old collector set",
1073           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1074           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1075       } else {
1076         _free_sets.make_free(idx, Mutator, alloc_capacity(region));
1077         log_debug(gc, free)(
1078           "  Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator set",
1079           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1080           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1081       }
1082     }
1083   }
1084 }
1085 
1086 // Move no more than cset_regions from the existing Collector and OldCollector free sets to the Mutator free set.
1087 // This is called from outside the heap lock.
1088 void ShenandoahFreeSet::move_collector_sets_to_mutator(size_t max_xfer_regions) {
1089   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1090   size_t collector_empty_xfer = 0;
1091   size_t collector_not_empty_xfer = 0;
1092   size_t old_collector_empty_xfer = 0;
1093 
1094   // Process empty regions within the Collector free set
1095   if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(Collector) <= _free_sets.rightmost_empty(Collector))) {
1096     ShenandoahHeapLocker locker(_heap->lock());
1097     for (size_t idx = _free_sets.leftmost_empty(Collector);
1098          (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(Collector)); idx++) {
1099       if (_free_sets.in_free_set(idx, Collector) && can_allocate_from(idx)) {
1100         _free_sets.move_to_set(idx, Mutator, region_size_bytes);
1101         max_xfer_regions--;
1102         collector_empty_xfer += region_size_bytes;
1103       }
1104     }
1105   }
1106 
1107   // Process empty regions within the OldCollector free set
1108   size_t old_collector_regions = 0;
1109   if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(OldCollector) <= _free_sets.rightmost_empty(OldCollector))) {
1110     ShenandoahHeapLocker locker(_heap->lock());
1111     for (size_t idx = _free_sets.leftmost_empty(OldCollector);
1112          (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(OldCollector)); idx++) {
1113       if (_free_sets.in_free_set(idx, OldCollector) && can_allocate_from(idx)) {
1114         _free_sets.move_to_set(idx, Mutator, region_size_bytes);
1115         max_xfer_regions--;
1116         old_collector_empty_xfer += region_size_bytes;
1117         old_collector_regions++;
1118       }
1119     }
1120     if (old_collector_regions > 0) {
1121       _heap->generation_sizer()->transfer_to_young(old_collector_regions);
1122     }
1123   }
1124 
1125   // If there are any non-empty regions within Collector set, we can also move them to the Mutator free set
1126   if ((max_xfer_regions > 0) && (_free_sets.leftmost(Collector) <= _free_sets.rightmost(Collector))) {
1127     ShenandoahHeapLocker locker(_heap->lock());
1128     for (size_t idx = _free_sets.leftmost(Collector); (max_xfer_regions > 0) && (idx <= _free_sets.rightmost(Collector)); idx++) {
1129       size_t alloc_capacity = this->alloc_capacity(idx);
1130       if (_free_sets.in_free_set(idx, Collector) && (alloc_capacity > 0)) {
1131         _free_sets.move_to_set(idx, Mutator, alloc_capacity);
1132         max_xfer_regions--;
1133         collector_not_empty_xfer += alloc_capacity;
1134       }
1135     }
1136   }
1137 
1138   size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
1139   size_t total_xfer = collector_xfer + old_collector_empty_xfer;
1140   log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1141                      SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1142                      byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1143                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1144                      byte_size_in_proper_unit(old_collector_empty_xfer), proper_unit_for_byte_size(old_collector_empty_xfer));
1145 }
1146 
1147 
1148 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1149 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions) {
1150   shenandoah_assert_heaplocked();
1151   // This resets all state information, removing all regions from all sets.
1152   clear();
1153   log_debug(gc, free)("Rebuilding FreeSet");
1154 
1155   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1156   // mutator set otherwise.
1157   find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions);
1158 }
1159 
1160 void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions) {
1161   shenandoah_assert_heaplocked();
1162   size_t young_reserve, old_reserve;
1163   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1164 
1165   size_t old_capacity = _heap->old_generation()->max_capacity();
1166   size_t old_available = _heap->old_generation()->available();
1167   size_t old_unaffiliated_regions = _heap->old_generation()->free_unaffiliated_regions();
1168   size_t young_capacity = _heap->young_generation()->max_capacity();
1169   size_t young_available = _heap->young_generation()->available();
1170   size_t young_unaffiliated_regions = _heap->young_generation()->free_unaffiliated_regions();
1171 
1172   old_unaffiliated_regions += old_cset_regions;
1173   old_available += old_cset_regions * region_size_bytes;
1174   young_unaffiliated_regions += young_cset_regions;
1175   young_available += young_cset_regions * region_size_bytes;
1176 
1177   // Consult old-region surplus and deficit to make adjustments to current generation capacities and availability.
1178   // The generation region transfers take place after we rebuild.
1179   size_t old_region_surplus = _heap->get_old_region_surplus();
1180   size_t old_region_deficit = _heap->get_old_region_deficit();
1181 
1182   if (old_region_surplus > 0) {
1183     size_t xfer_bytes = old_region_surplus * region_size_bytes;
1184     assert(old_region_surplus <= old_unaffiliated_regions, "Cannot transfer regions that are affiliated");
1185     old_capacity -= xfer_bytes;
1186     old_available -= xfer_bytes;
1187     old_unaffiliated_regions -= old_region_surplus;
1188     young_capacity += xfer_bytes;
1189     young_available += xfer_bytes;
1190     young_unaffiliated_regions += old_region_surplus;
1191   } else if (old_region_deficit > 0) {
1192     size_t xfer_bytes = old_region_deficit * region_size_bytes;
1193     assert(old_region_deficit <= young_unaffiliated_regions, "Cannot transfer regions that are affiliated");
1194     old_capacity += xfer_bytes;
1195     old_available += xfer_bytes;
1196     old_unaffiliated_regions += old_region_deficit;
1197     young_capacity -= xfer_bytes;;
1198     young_available -= xfer_bytes;
1199     young_unaffiliated_regions -= old_region_deficit;
1200   }
1201 
1202   // Evac reserve: reserve trailing space for evacuations, with regions reserved for old evacuations placed to the right
1203   // of regions reserved of young evacuations.
1204   if (!_heap->mode()->is_generational()) {
1205     young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1206     old_reserve = 0;
1207   } else {
1208     // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1209     // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
1210     // which is reserved for promotion is enforced using thread-local variables that prescribe intentons for
1211     // each PLAB's available memory.
1212     if (_heap->has_evacuation_reserve_quantities()) {
1213       // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1214       young_reserve = _heap->get_young_evac_reserve();
1215       old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve();
1216       assert(old_reserve <= old_available,
1217              "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1218              _heap->get_promoted_reserve(), _heap->get_old_evac_reserve(), old_available);
1219     } else {
1220       // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1221       young_reserve = (young_capacity * ShenandoahEvacReserve) / 100;
1222       // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1223       // Affiliated old-gen regions are already in the OldCollector free set.  Add in the relevant number of
1224       // unaffiliated regions.
1225       old_reserve = old_available;
1226     }
1227   }
1228   if (old_reserve > _free_sets.capacity_of(OldCollector)) {
1229     // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1230     // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
1231     // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1232     // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1233     if (old_reserve > _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1234       old_reserve = _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes;
1235     }
1236   }
1237   if (young_reserve > young_unaffiliated_regions * region_size_bytes) {
1238     young_reserve = young_unaffiliated_regions * region_size_bytes;
1239   }
1240 
1241   reserve_regions(young_reserve, old_reserve);
1242   _free_sets.establish_alloc_bias(OldCollector);
1243   _free_sets.assert_bounds();
1244   log_status();
1245 }
1246 
1247 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1248 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1249 // into the collector set or old collector set in order to assure that the memory available for allocations within
1250 // the collector set is at least to_reserve, and the memory available for allocations within the old collector set
1251 // is at least to_reserve_old.
1252 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old) {
1253   for (size_t i = _heap->num_regions(); i > 0; i--) {
1254     size_t idx = i - 1;
1255     ShenandoahHeapRegion* r = _heap->get_region(idx);
1256     if (_free_sets.in_free_set(idx, Mutator)) {
1257       assert (!r->is_old(), "mutator_is_free regions should not be affiliated OLD");
1258       size_t ac = alloc_capacity(r);
1259       assert (ac > 0, "Membership in free set implies has capacity");
1260 
1261       // OLD regions that have available memory are already in the old_collector free set
1262       if ((_free_sets.capacity_of(OldCollector) < to_reserve_old) && (r->is_trash() || !r->is_affiliated())) {
1263         _free_sets.move_to_set(idx, OldCollector, alloc_capacity(r));
1264         log_debug(gc, free)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1265       } else if (_free_sets.capacity_of(Collector) < to_reserve) {
1266         // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1267         // they were entirely empty.  I'm not sure I understand the rational for that.  That alternative behavior would
1268         // tend to mix survivor objects with ephemeral objects, making it more difficult to reclaim the memory for the
1269         // ephemeral objects.  It also delays aging of regions, causing promotion in place to be delayed.
1270         _free_sets.move_to_set(idx, Collector, ac);
1271         log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1272       } else {
1273         // We've satisfied both to_reserve and to_reserved_old
1274         break;
1275       }
1276     }
1277   }
1278 }
1279 
1280 void ShenandoahFreeSet::log_status() {
1281   shenandoah_assert_heaplocked();
1282 
1283 #ifdef ASSERT
1284   // Dump of the FreeSet details is only enabled if assertions are enabled
1285   {
1286 #define BUFFER_SIZE 80
1287     size_t retired_old = 0;
1288     size_t retired_old_humongous = 0;
1289     size_t retired_young = 0;
1290     size_t retired_young_humongous = 0;
1291     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1292     size_t retired_young_waste = 0;
1293     size_t retired_old_waste = 0;
1294     size_t consumed_collector = 0;
1295     size_t consumed_old_collector = 0;
1296     size_t consumed_mutator = 0;
1297     size_t available_old = 0;
1298     size_t available_young = 0;
1299     size_t available_mutator = 0;
1300     size_t available_collector = 0;
1301     size_t available_old_collector = 0;
1302 
1303     char buffer[BUFFER_SIZE];
1304     for (uint i = 0; i < BUFFER_SIZE; i++) {
1305       buffer[i] = '\0';
1306     }
1307     log_info(gc, free)("FreeSet map legend:"
1308                        " M:mutator_free C:collector_free O:old_collector_free"
1309                        " H:humongous ~:retired old _:retired young");
1310     log_info(gc, free)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1311                        " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1312                        "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
1313                        _free_sets.leftmost(Mutator), _free_sets.rightmost(Mutator),
1314                        _free_sets.leftmost(Collector), _free_sets.rightmost(Collector),
1315                        _free_sets.leftmost(OldCollector), _free_sets.rightmost(OldCollector),
1316                        _free_sets.alloc_from_left_bias(OldCollector)? "left to right": "right to left");
1317 
1318     for (uint i = 0; i < _heap->num_regions(); i++) {
1319       ShenandoahHeapRegion *r = _heap->get_region(i);
1320       uint idx = i % 64;
1321       if ((i != 0) && (idx == 0)) {
1322         log_info(gc, free)(" %6u: %s", i-64, buffer);
1323       }
1324       if (_free_sets.in_free_set(i, Mutator)) {
1325         assert(!r->is_old(), "Old regions should not be in mutator_free set");
1326         size_t capacity = alloc_capacity(r);
1327         available_mutator += capacity;
1328         consumed_mutator += region_size_bytes - capacity;
1329         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1330       } else if (_free_sets.in_free_set(i, Collector)) {
1331         assert(!r->is_old(), "Old regions should not be in collector_free set");
1332         size_t capacity = alloc_capacity(r);
1333         available_collector += capacity;
1334         consumed_collector += region_size_bytes - capacity;
1335         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
1336       } else if (_free_sets.in_free_set(i, OldCollector)) {
1337         size_t capacity = alloc_capacity(r);
1338         available_old_collector += capacity;
1339         consumed_old_collector += region_size_bytes - capacity;
1340         buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
1341       } else if (r->is_humongous()) {
1342         if (r->is_old()) {
1343           buffer[idx] = 'H';
1344           retired_old_humongous += region_size_bytes;
1345         } else {
1346           buffer[idx] = 'h';
1347           retired_young_humongous += region_size_bytes;
1348         }
1349       } else {
1350         if (r->is_old()) {
1351           buffer[idx] = '~';
1352           retired_old_waste += alloc_capacity(r);
1353           retired_old += region_size_bytes;
1354         } else {
1355           buffer[idx] = '_';
1356           retired_young_waste += alloc_capacity(r);
1357           retired_young += region_size_bytes;
1358         }
1359       }
1360     }
1361     uint remnant = _heap->num_regions() % 64;
1362     if (remnant > 0) {
1363       buffer[remnant] = '\0';
1364     } else {
1365       remnant = 64;
1366     }
1367     log_info(gc, free)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1368     size_t total_young = retired_young + retired_young_humongous;
1369     size_t total_old = retired_old + retired_old_humongous;
1370   }
1371 #endif
1372 
1373   LogTarget(Info, gc, free) lt;
1374   if (lt.is_enabled()) {
1375     ResourceMark rm;
1376     LogStream ls(lt);
1377 
1378     {
1379       size_t last_idx = 0;
1380       size_t max = 0;
1381       size_t max_contig = 0;
1382       size_t empty_contig = 0;
1383 
1384       size_t total_used = 0;
1385       size_t total_free = 0;
1386       size_t total_free_ext = 0;
1387 
1388       for (size_t idx = _free_sets.leftmost(Mutator); idx <= _free_sets.rightmost(Mutator); idx++) {
1389         if (_free_sets.in_free_set(idx, Mutator)) {
1390           ShenandoahHeapRegion *r = _heap->get_region(idx);
1391           size_t free = alloc_capacity(r);
1392           max = MAX2(max, free);
1393           if (r->is_empty()) {
1394             total_free_ext += free;
1395             if (last_idx + 1 == idx) {
1396               empty_contig++;
1397             } else {
1398               empty_contig = 1;
1399             }
1400           } else {
1401             empty_contig = 0;
1402           }
1403           total_used += r->used();
1404           total_free += free;
1405           max_contig = MAX2(max_contig, empty_contig);
1406           last_idx = idx;
1407         }
1408       }
1409 
1410       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1411       size_t free = capacity() - used();
1412 
1413       assert(free == total_free, "Sum of free within mutator regions (" SIZE_FORMAT
1414              ") should match mutator capacity (" SIZE_FORMAT ") minus mutator used (" SIZE_FORMAT ")",
1415              total_free, capacity(), used());
1416 
1417       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1418                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
1419                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1420                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1421       );
1422 
1423       ls.print("Frag: ");
1424       size_t frag_ext;
1425       if (total_free_ext > 0) {
1426         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1427       } else {
1428         frag_ext = 0;
1429       }
1430       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1431 
1432       size_t frag_int;
1433       if (_free_sets.count(Mutator) > 0) {
1434         frag_int = (100 * (total_used / _free_sets.count(Mutator)) / ShenandoahHeapRegion::region_size_bytes());
1435       } else {
1436         frag_int = 0;
1437       }
1438       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
1439       ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
1440                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), _free_sets.count(Mutator));
1441     }
1442 
1443     {
1444       size_t max = 0;
1445       size_t total_free = 0;
1446       size_t total_used = 0;
1447 
1448       for (size_t idx = _free_sets.leftmost(Collector); idx <= _free_sets.rightmost(Collector); idx++) {
1449         if (_free_sets.in_free_set(idx, Collector)) {
1450           ShenandoahHeapRegion *r = _heap->get_region(idx);
1451           size_t free = alloc_capacity(r);
1452           max = MAX2(max, free);
1453           total_free += free;
1454           total_used += r->used();
1455         }
1456       }
1457       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1458                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1459                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1460                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1461     }
1462 
1463     if (_heap->mode()->is_generational()) {
1464       size_t max = 0;
1465       size_t total_free = 0;
1466       size_t total_used = 0;
1467 
1468       for (size_t idx = _free_sets.leftmost(OldCollector); idx <= _free_sets.rightmost(OldCollector); idx++) {
1469         if (_free_sets.in_free_set(idx, OldCollector)) {
1470           ShenandoahHeapRegion *r = _heap->get_region(idx);
1471           size_t free = alloc_capacity(r);
1472           max = MAX2(max, free);
1473           total_free += free;
1474           total_used += r->used();
1475         }
1476       }
1477       ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1478                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1479                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1480                   byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1481     }
1482   }
1483 }
1484 
1485 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1486   shenandoah_assert_heaplocked();
1487   _free_sets.assert_bounds();
1488 
1489   // Allocation request is known to satisfy all memory budgeting constraints.
1490   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
1491     switch (req.type()) {
1492       case ShenandoahAllocRequest::_alloc_shared:
1493       case ShenandoahAllocRequest::_alloc_shared_gc:
1494         in_new_region = true;
1495         return allocate_contiguous(req);
1496       case ShenandoahAllocRequest::_alloc_plab:
1497       case ShenandoahAllocRequest::_alloc_gclab:
1498       case ShenandoahAllocRequest::_alloc_tlab:
1499         in_new_region = false;
1500         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
1501                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
1502         return nullptr;
1503       default:
1504         ShouldNotReachHere();
1505         return nullptr;
1506     }
1507   } else {
1508     return allocate_single(req, in_new_region);
1509   }
1510 }
1511 
1512 size_t ShenandoahFreeSet::unsafe_peek_free() const {
1513   // Deliberately not locked, this method is unsafe when free set is modified.
1514 
1515   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1516     if (index < _free_sets.max() && _free_sets.in_free_set(index, Mutator)) {
1517       ShenandoahHeapRegion* r = _heap->get_region(index);
1518       if (r->free() >= MinTLABSize) {
1519         return r->free();
1520       }
1521     }
1522   }
1523 
1524   // It appears that no regions left
1525   return 0;
1526 }
1527 
1528 void ShenandoahFreeSet::print_on(outputStream* out) const {
1529   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _free_sets.count(Mutator));
1530   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1531     if (_free_sets.in_free_set(index, Mutator)) {
1532       _heap->get_region(index)->print_on(out);
1533     }
1534   }
1535   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _free_sets.count(Collector));
1536   for (size_t index = _free_sets.leftmost(Collector); index <= _free_sets.rightmost(Collector); index++) {
1537     if (_free_sets.in_free_set(index, Collector)) {
1538       _heap->get_region(index)->print_on(out);
1539     }
1540   }
1541   if (_heap->mode()->is_generational()) {
1542     out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _free_sets.count(OldCollector));
1543     for (size_t index = _free_sets.leftmost(OldCollector); index <= _free_sets.rightmost(OldCollector); index++) {
1544       if (_free_sets.in_free_set(index, OldCollector)) {
1545         _heap->get_region(index)->print_on(out);
1546       }
1547     }
1548   }
1549 }
1550 
1551 /*
1552  * Internal fragmentation metric: describes how fragmented the heap regions are.
1553  *
1554  * It is derived as:
1555  *
1556  *               sum(used[i]^2, i=0..k)
1557  *   IF = 1 - ------------------------------
1558  *              C * sum(used[i], i=0..k)
1559  *
1560  * ...where k is the number of regions in computation, C is the region capacity, and
1561  * used[i] is the used space in the region.
1562  *
1563  * The non-linearity causes IF to be lower for the cases where the same total heap
1564  * used is densely packed. For example:
1565  *   a) Heap is completely full  => IF = 0
1566  *   b) Heap is half full, first 50% regions are completely full => IF = 0
1567  *   c) Heap is half full, each region is 50% full => IF = 1/2
1568  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
1569  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
1570  *   f) Heap has one small object per each region => IF =~ 1
1571  */
1572 double ShenandoahFreeSet::internal_fragmentation() {
1573   double squared = 0;
1574   double linear = 0;
1575   int count = 0;
1576 
1577   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1578     if (_free_sets.in_free_set(index, Mutator)) {
1579       ShenandoahHeapRegion* r = _heap->get_region(index);
1580       size_t used = r->used();
1581       squared += used * used;
1582       linear += used;
1583       count++;
1584     }
1585   }
1586 
1587   if (count > 0) {
1588     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
1589     return 1 - s;
1590   } else {
1591     return 0;
1592   }
1593 }
1594 
1595 /*
1596  * External fragmentation metric: describes how fragmented the heap is.
1597  *
1598  * It is derived as:
1599  *
1600  *   EF = 1 - largest_contiguous_free / total_free
1601  *
1602  * For example:
1603  *   a) Heap is completely empty => EF = 0
1604  *   b) Heap is completely full => EF = 0
1605  *   c) Heap is first-half full => EF = 1/2
1606  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
1607  */
1608 double ShenandoahFreeSet::external_fragmentation() {
1609   size_t last_idx = 0;
1610   size_t max_contig = 0;
1611   size_t empty_contig = 0;
1612 
1613   size_t free = 0;
1614 
1615   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1616     if (_free_sets.in_free_set(index, Mutator)) {
1617       ShenandoahHeapRegion* r = _heap->get_region(index);
1618       if (r->is_empty()) {
1619         free += ShenandoahHeapRegion::region_size_bytes();
1620         if (last_idx + 1 == index) {
1621           empty_contig++;
1622         } else {
1623           empty_contig = 1;
1624         }
1625       } else {
1626         empty_contig = 0;
1627       }
1628 
1629       max_contig = MAX2(max_contig, empty_contig);
1630       last_idx = index;
1631     }
1632   }
1633 
1634   if (free > 0) {
1635     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
1636   } else {
1637     return 0;
1638   }
1639 }
1640