< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page

   1 /*
   2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/tlab_globals.hpp"


  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"





  31 #include "logging/logStream.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "runtime/orderAccess.hpp"
  34 


















































































































































































































































































































































































































































































































































































































































  35 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
  36   _heap(heap),
  37   _mutator_free_bitmap(max_regions, mtGC),
  38   _collector_free_bitmap(max_regions, mtGC),
  39   _max(max_regions)
  40 {
  41   clear_internal();
  42 }
  43 
  44 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  45   shenandoah_assert_heaplocked();
  46   _used += num_bytes;
  47 
  48   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
  49          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
  50 }
  51 
  52 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  53   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  54           idx, _max, _mutator_leftmost, _mutator_rightmost);
  55   return _mutator_free_bitmap.at(idx);
  56 }
  57 
  58 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  59   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  60           idx, _max, _collector_leftmost, _collector_rightmost);
  61   return _collector_free_bitmap.at(idx);

































  62 }
  63 
  64 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {


  65   // Scan the bitmap looking for a first fit.
  66   //
  67   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  68   // we would find the region to allocate at right away.
  69   //
  70   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  71   // go to the end. This makes application allocation faster, because we would clear lots
  72   // of regions from the beginning most of the time.
  73   //
  74   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  75   // unless we special cases for stealing and mixed allocations.





















  76 





  77   switch (req.type()) {
  78     case ShenandoahAllocRequest::_alloc_tlab:
  79     case ShenandoahAllocRequest::_alloc_shared: {
  80 
  81       // Try to allocate in the mutator view
  82       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  83         if (is_mutator_free(idx)) {
  84           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  85           if (result != nullptr) {
  86             return result;






















































  87           }
  88         }
  89       }
  90 
  91       // There is no recovery. Mutator does not touch collector view at all.
  92       break;
  93     }
  94     case ShenandoahAllocRequest::_alloc_gclab:
  95     case ShenandoahAllocRequest::_alloc_shared_gc: {
  96       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  97 





  98       // Fast-path: try to allocate in the collector view first
  99       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
 100         size_t idx = c - 1;
 101         if (is_collector_free(idx)) {
 102           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 103           if (result != nullptr) {
 104             return result;
 105           }






 106         }
 107       }
 108 
 109       // No dice. Can we borrow space from mutator view?
 110       if (!ShenandoahEvacReserveOverflow) {
 111         return nullptr;
 112       }




 113 
 114       // Try to steal the empty region from the mutator view
 115       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 116         size_t idx = c - 1;
 117         if (is_mutator_free(idx)) {








 118           ShenandoahHeapRegion* r = _heap->get_region(idx);
 119           if (can_allocate_from(r)) {
 120             flip_to_gc(r);
 121             HeapWord *result = try_allocate_in(r, req, in_new_region);
 122             if (result != nullptr) {
 123               return result;
 124             }




 125           }

 126         }
 127       }
 128 
 129       // No dice. Do not try to mix mutator and GC allocations, because
 130       // URWM moves due to GC allocations would expose unparsable mutator
 131       // allocations.
 132 
 133       break;
 134     }

 135     default:
 136       ShouldNotReachHere();
 137   }
 138 
 139   return nullptr;
 140 }
 141 
 142 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 143   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());















































 144 
 145   if (_heap->is_concurrent_weak_root_in_progress() &&
 146       r->is_trash()) {

 147     return nullptr;
 148   }
 149 
 150   try_recycle_trashed(r);
 151 
 152   in_new_region = r->is_empty();
 153 
 154   HeapWord* result = nullptr;
 155   size_t size = req.size();














 156 
 157   if (req.is_lab_alloc()) {
 158     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 159     if (size > free) {
 160       size = free;









 161     }
 162     if (size >= req.min_size()) {
 163       result = r->allocate(size, req.type());
 164       assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);








































 165     }
 166   } else {
 167     result = r->allocate(size, req.type());





 168   }
 169 
 170   if (result != nullptr) {
 171     // Allocation successful, bump stats:
 172     if (req.is_mutator_alloc()) {
 173       increase_used(size * HeapWordSize);
 174     }
 175 
 176     // Record actual allocation size
 177     req.set_actual_size(size);
 178 
 179     if (req.is_gc_alloc()) {


 180       r->set_update_watermark(r->top());







 181     }
 182   }
 183 
 184   if (result == nullptr || has_no_alloc_capacity(r)) {
 185     // Region cannot afford this or future allocations. Retire it.
 186     //
 187     // While this seems a bit harsh, especially in the case when this large allocation does not
 188     // fit, but the next small one would, we are risking to inflate scan times when lots of
 189     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 190     // TODO: Record first fully-empty region, and use that for large allocations


 191 
 192     // Record the remainder as allocation waste

 193     if (req.is_mutator_alloc()) {
 194       size_t waste = r->free();
 195       if (waste > 0) {
 196         increase_used(waste);
 197         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);







 198       }
 199     }
 200 
 201     size_t num = r->index();
 202     _collector_free_bitmap.clear_bit(num);
 203     _mutator_free_bitmap.clear_bit(num);
 204     // Touched the bounds? Need to update:
 205     if (touches_bounds(num)) {
 206       adjust_bounds();
 207     }
 208     assert_bounds();
 209   }
 210   return result;
 211 }
 212 
 213 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 214   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 215 }
 216 
 217 void ShenandoahFreeSet::recompute_bounds() {
 218   // Reset to the most pessimistic case:
 219   _mutator_rightmost = _max - 1;
 220   _mutator_leftmost = 0;
 221   _collector_rightmost = _max - 1;
 222   _collector_leftmost = 0;
 223 
 224   // ...and adjust from there
 225   adjust_bounds();
 226 }
 227 
 228 void ShenandoahFreeSet::adjust_bounds() {
 229   // Rewind both mutator bounds until the next bit.
 230   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 231     _mutator_leftmost++;
 232   }
 233   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 234     _mutator_rightmost--;
 235   }
 236   // Rewind both collector bounds until the next bit.
 237   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 238     _collector_leftmost++;
 239   }
 240   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 241     _collector_rightmost--;
 242   }
 243 }
 244 
 245 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {

 246   shenandoah_assert_heaplocked();
 247 
 248   size_t words_size = req.size();
 249   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 250 
 251   // No regions left to satisfy allocation, bye.
 252   if (num > mutator_count()) {



 253     return nullptr;
 254   }
 255 




 256   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 257   // inclusive. Contiguous allocations are biased to the beginning.
 258 
 259   size_t beg = _mutator_leftmost;
 260   size_t end = beg;




 261 
 262   while (true) {
 263     if (end >= _max) {
 264       // Hit the end, goodbye
 265       return nullptr;
 266     }
 267 
 268     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 269     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 270     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
 271       end++;
 272       beg = end;
 273       continue;















 274     }
 275 
 276     if ((end - beg + 1) == num) {
 277       // found the match
 278       break;
 279     }
 280 
 281     end++;
 282   }
 283 
 284   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 285 
 286   // Initialize regions:
 287   for (size_t i = beg; i <= end; i++) {
 288     ShenandoahHeapRegion* r = _heap->get_region(i);
 289     try_recycle_trashed(r);
 290 
 291     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 292     assert(r->is_empty(), "Should be empty");
 293 
 294     if (i == beg) {
 295       r->make_humongous_start();
 296     } else {
 297       r->make_humongous_cont();
 298     }
 299 
 300     // Trailing region may be non-full, record the remainder there
 301     size_t used_words;
 302     if ((i == end) && (remainder != 0)) {
 303       used_words = remainder;
 304     } else {
 305       used_words = ShenandoahHeapRegion::region_size_words();
 306     }
 307 


 308     r->set_top(r->bottom() + used_words);
 309 
 310     _mutator_free_bitmap.clear_bit(r->index());
 311   }
 312 
 313   // While individual regions report their true use, all humongous regions are
 314   // marked used in the free set.
 315   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 316 
 317   if (remainder != 0) {
 318     // Record this remainder as allocation waste
 319     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 320   }
 321 
 322   // Allocated at left/rightmost? Move the bounds appropriately.
 323   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 324     adjust_bounds();
 325   }
 326   assert_bounds();
 327 



 328   req.set_actual_size(words_size);
 329   return _heap->get_region(beg)->bottom();
 330 }
 331 
 332 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
 333   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 334 }
 335 
 336 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 337   if (r->is_trash()) {
 338     // This would be recycled on allocation path
 339     return ShenandoahHeapRegion::region_size_bytes();
 340   } else {
 341     return r->free();
 342   }

 343 }
 344 
 345 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 346   return alloc_capacity(r) == 0;
 347 }
 348 
 349 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 350   if (r->is_trash()) {
 351     _heap->decrease_used(r->used());
 352     r->recycle();
 353   }
 354 }
 355 
 356 void ShenandoahFreeSet::recycle_trash() {
 357   // lock is not reentrable, check we don't have it
 358   shenandoah_assert_not_heaplocked();
 359 
 360   for (size_t i = 0; i < _heap->num_regions(); i++) {
 361     ShenandoahHeapRegion* r = _heap->get_region(i);
 362     if (r->is_trash()) {
 363       ShenandoahHeapLocker locker(_heap->lock());
 364       try_recycle_trashed(r);
 365     }
 366     SpinPause(); // allow allocators to take the lock








































 367   }
 368 }
 369 
 370 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 371   size_t idx = r->index();
 372 
 373   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 374   assert(can_allocate_from(r), "Should not be allocated");
 375 
 376   _mutator_free_bitmap.clear_bit(idx);
 377   _collector_free_bitmap.set_bit(idx);
 378   _collector_leftmost = MIN2(idx, _collector_leftmost);
 379   _collector_rightmost = MAX2(idx, _collector_rightmost);










 380 
 381   _capacity -= alloc_capacity(r);

 382 
 383   if (touches_bounds(idx)) {
 384     adjust_bounds();
 385   }
 386   assert_bounds();






 387 }
 388 
 389 void ShenandoahFreeSet::clear() {
 390   shenandoah_assert_heaplocked();
 391   clear_internal();
 392 }
 393 
 394 void ShenandoahFreeSet::clear_internal() {
 395   _mutator_free_bitmap.clear();
 396   _collector_free_bitmap.clear();
 397   _mutator_leftmost = _max;
 398   _mutator_rightmost = 0;
 399   _collector_leftmost = _max;
 400   _collector_rightmost = 0;
 401   _capacity = 0;
 402   _used = 0;
 403 }
 404 
 405 void ShenandoahFreeSet::rebuild() {
 406   shenandoah_assert_heaplocked();
 407   clear();

 408 
 409   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
























 410     ShenandoahHeapRegion* region = _heap->get_region(idx);

















 411     if (region->is_alloc_allowed() || region->is_trash()) {
 412       assert(!region->is_cset(), "Shouldn't be adding those to the free set");















































































 413 
 414       // Do not add regions that would surely fail allocation
 415       if (has_no_alloc_capacity(region)) continue;


















 416 
 417       _capacity += alloc_capacity(region);
 418       assert(_used <= _capacity, "must not use more than we have");

















 419 
 420       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 421       _mutator_free_bitmap.set_bit(idx);























 422     }
 423   }
 424 
 425   // Evac reserve: reserve trailing space for evacuations
 426   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
 427   size_t reserved = 0;





 428 
 429   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 430     if (reserved >= to_reserve) break;





 431 
 432     ShenandoahHeapRegion* region = _heap->get_region(idx);
 433     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
 434       _mutator_free_bitmap.clear_bit(idx);
 435       _collector_free_bitmap.set_bit(idx);
 436       size_t ac = alloc_capacity(region);
 437       _capacity -= ac;
 438       reserved += ac;




















































































 439     }





























 440   }
 441 
 442   recompute_bounds();
 443   assert_bounds();




































































































































 444 }
 445 
 446 void ShenandoahFreeSet::log_status() {
 447   shenandoah_assert_heaplocked();
 448 
 449   LogTarget(Info, gc, ergo) lt;





























































































 450   if (lt.is_enabled()) {
 451     ResourceMark rm;
 452     LogStream ls(lt);
 453 
 454     {
 455       size_t last_idx = 0;
 456       size_t max = 0;
 457       size_t max_contig = 0;
 458       size_t empty_contig = 0;
 459 
 460       size_t total_used = 0;
 461       size_t total_free = 0;
 462       size_t total_free_ext = 0;
 463 
 464       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 465         if (is_mutator_free(idx)) {

 466           ShenandoahHeapRegion *r = _heap->get_region(idx);
 467           size_t free = alloc_capacity(r);
 468 
 469           max = MAX2(max, free);
 470 
 471           if (r->is_empty()) {
 472             total_free_ext += free;
 473             if (last_idx + 1 == idx) {
 474               empty_contig++;
 475             } else {
 476               empty_contig = 1;
 477             }
 478           } else {
 479             empty_contig = 0;
 480           }
 481 
 482           total_used += r->used();
 483           total_free += free;
 484 
 485           max_contig = MAX2(max_contig, empty_contig);
 486           last_idx = idx;
 487         }
 488       }
 489 
 490       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 491       size_t free = capacity() - used();
 492 




 493       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
 494                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
 495                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
 496                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
 497       );
 498 
 499       ls.print("Frag: ");
 500       size_t frag_ext;
 501       if (total_free_ext > 0) {
 502         frag_ext = 100 - (100 * max_humongous / total_free_ext);
 503       } else {
 504         frag_ext = 0;
 505       }
 506       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
 507 
 508       size_t frag_int;
 509       if (mutator_count() > 0) {
 510         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());

 511       } else {
 512         frag_int = 0;
 513       }
 514       ls.print(SIZE_FORMAT "%% internal; ", frag_int);



 515     }
 516 
 517     {
 518       size_t max = 0;
 519       size_t total_free = 0;

 520 
 521       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 522         if (is_collector_free(idx)) {

 523           ShenandoahHeapRegion *r = _heap->get_region(idx);
 524           size_t free = alloc_capacity(r);
 525           max = MAX2(max, free);
 526           total_free += free;

 527         }
 528       }





 529 
 530       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",















 531                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
 532                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));

 533     }
 534   }
 535 }
 536 
 537 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
 538   shenandoah_assert_heaplocked();
 539   assert_bounds();
 540 
 541   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 542     switch (req.type()) {
 543       case ShenandoahAllocRequest::_alloc_shared:
 544       case ShenandoahAllocRequest::_alloc_shared_gc:
 545         in_new_region = true;
 546         return allocate_contiguous(req);

 547       case ShenandoahAllocRequest::_alloc_gclab:
 548       case ShenandoahAllocRequest::_alloc_tlab:
 549         in_new_region = false;
 550         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 551                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
 552         return nullptr;
 553       default:
 554         ShouldNotReachHere();
 555         return nullptr;
 556     }
 557   } else {
 558     return allocate_single(req, in_new_region);
 559   }
 560 }
 561 
 562 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 563   // Deliberately not locked, this method is unsafe when free set is modified.
 564 
 565   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 566     if (index < _max && is_mutator_free(index)) {
 567       ShenandoahHeapRegion* r = _heap->get_region(index);
 568       if (r->free() >= MinTLABSize) {
 569         return r->free();
 570       }
 571     }
 572   }
 573 
 574   // It appears that no regions left
 575   return 0;
 576 }
 577 
 578 void ShenandoahFreeSet::print_on(outputStream* out) const {
 579   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 580   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 581     if (is_mutator_free(index)) {
 582       _heap->get_region(index)->print_on(out);
 583     }










 584   }
 585   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 586   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 587     if (is_collector_free(index)) {
 588       _heap->get_region(index)->print_on(out);



 589     }
 590   }
 591 }
 592 
 593 /*
 594  * Internal fragmentation metric: describes how fragmented the heap regions are.
 595  *
 596  * It is derived as:
 597  *
 598  *               sum(used[i]^2, i=0..k)
 599  *   IF = 1 - ------------------------------
 600  *              C * sum(used[i], i=0..k)
 601  *
 602  * ...where k is the number of regions in computation, C is the region capacity, and
 603  * used[i] is the used space in the region.
 604  *
 605  * The non-linearity causes IF to be lower for the cases where the same total heap
 606  * used is densely packed. For example:
 607  *   a) Heap is completely full  => IF = 0
 608  *   b) Heap is half full, first 50% regions are completely full => IF = 0
 609  *   c) Heap is half full, each region is 50% full => IF = 1/2
 610  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
 611  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
 612  *   f) Heap has one small object per each region => IF =~ 1
 613  */
 614 double ShenandoahFreeSet::internal_fragmentation() {
 615   double squared = 0;
 616   double linear = 0;
 617   int count = 0;
 618 
 619   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 620     if (is_mutator_free(index)) {
 621       ShenandoahHeapRegion* r = _heap->get_region(index);
 622       size_t used = r->used();
 623       squared += used * used;
 624       linear += used;
 625       count++;
 626     }

 627   }
 628 
 629   if (count > 0) {
 630     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
 631     return 1 - s;
 632   } else {
 633     return 0;
 634   }
 635 }
 636 
 637 /*
 638  * External fragmentation metric: describes how fragmented the heap is.
 639  *
 640  * It is derived as:
 641  *
 642  *   EF = 1 - largest_contiguous_free / total_free
 643  *
 644  * For example:
 645  *   a) Heap is completely empty => EF = 0
 646  *   b) Heap is completely full => EF = 0
 647  *   c) Heap is first-half full => EF = 1/2
 648  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
 649  */
 650 double ShenandoahFreeSet::external_fragmentation() {
 651   size_t last_idx = 0;
 652   size_t max_contig = 0;
 653   size_t empty_contig = 0;
 654 
 655   size_t free = 0;
 656 
 657   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 658     if (is_mutator_free(index)) {
 659       ShenandoahHeapRegion* r = _heap->get_region(index);
 660       if (r->is_empty()) {
 661         free += ShenandoahHeapRegion::region_size_bytes();
 662         if (last_idx + 1 == index) {
 663           empty_contig++;
 664         } else {
 665           empty_contig = 1;
 666         }
 667       } else {
 668         empty_contig = 0;
 669       }
 670 
 671       max_contig = MAX2(max_contig, empty_contig);
 672       last_idx = index;
 673     }



 674   }
 675 
 676   if (free > 0) {
 677     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
 678   } else {
 679     return 0;
 680   }
 681 }
 682 
 683 #ifdef ASSERT
 684 void ShenandoahFreeSet::assert_bounds() const {
 685   // Performance invariants. Failing these would not break the free set, but performance
 686   // would suffer.
 687   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
 688   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
 689 
 690   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
 691   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
 692 
 693   size_t beg_off = _mutator_free_bitmap.find_first_set_bit(0);
 694   size_t end_off = _mutator_free_bitmap.find_first_set_bit(_mutator_rightmost + 1);
 695   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
 696   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
 697 
 698   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
 699   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
 700 
 701   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
 702   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
 703 
 704   beg_off = _collector_free_bitmap.find_first_set_bit(0);
 705   end_off = _collector_free_bitmap.find_first_set_bit(_collector_rightmost + 1);
 706   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
 707   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
 708 }
 709 #endif

   1 /*
   2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
  38 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "runtime/orderAccess.hpp"
  42 
  43 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
  44   switch (t) {
  45     case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
  46     case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
  47     case ShenandoahFreeSetPartitionId::Collector: return "Collector";
  48     case ShenandoahFreeSetPartitionId::OldCollector: return "OldCollector";
  49     default:
  50       ShouldNotReachHere();
  51       return "Unrecognized";
  52   }
  53 }
  54 
  55 #ifndef PRODUCT
  56 void ShenandoahRegionPartitions::dump_bitmap() const {
  57   log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  58                 "], Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
  59                 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  60                 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  61                 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
  62                 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)],
  63                 _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)],
  64                 _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]);
  65   log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  66                 "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  67                 "], Empty Old Collecto range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
  68                 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  69                 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  70                 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
  71                 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
  72                 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
  73                 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]);
  74 
  75   log_debug(gc)("%6s: %18s %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "Old Collector Bits", "NotFree Bits");
  76   dump_bitmap_range(0, _max-1);
  77 }
  78 
  79 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
  80   assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
  81   assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
  82   idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
  83   idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
  84   idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
  85   while (aligned_start <= aligned_end) {
  86     dump_bitmap_row(aligned_start);
  87     aligned_start += alignment;
  88   }
  89 }
  90 
  91 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
  92   assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
  93   idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
  94   uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
  95   uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
  96   uintx old_collector_bits = _membership[int(ShenandoahFreeSetPartitionId::OldCollector)].bits_at(aligned_idx);
  97   uintx free_bits = mutator_bits | collector_bits | old_collector_bits;
  98   uintx notfree_bits =  ~free_bits;
  99   log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
 100                 aligned_idx, mutator_bits, collector_bits, old_collector_bits, notfree_bits);
 101 }
 102 #endif
 103 
 104 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
 105     _max(max_regions),
 106     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
 107     _free_set(free_set),
 108     _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) , ShenandoahSimpleBitMap(max_regions) }
 109 {
 110   make_all_regions_unavailable();
 111 }
 112 
 113 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 114   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 115 }
 116 
 117 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 118   ShenandoahHeapRegion* r = _heap->get_region(idx);
 119   return can_allocate_from(r);
 120 }
 121 
 122 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 123   if (r->is_trash()) {
 124     // This would be recycled on allocation path
 125     return ShenandoahHeapRegion::region_size_bytes();
 126   } else {
 127     return r->free();
 128   }
 129 }
 130 
 131 inline size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
 132   ShenandoahHeapRegion* r = _heap->get_region(idx);
 133   return alloc_capacity(r);
 134 }
 135 
 136 inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
 137   return alloc_capacity(r) > 0;
 138 }
 139 
 140 inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const {
 141   assert (which_partition < NumPartitions, "selected free partition must be valid");
 142   idx_t idx = _leftmosts[int(which_partition)];
 143   if (idx >= _max) {
 144     return _max;
 145   } else {
 146     // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
 147     // to query the original value of leftmost when leftmost must be adjusted because the interval representing
 148     // which_partition is shrinking after the region that used to be leftmost is retired.
 149     return idx;
 150   }
 151 }
 152 
 153 inline idx_t ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId which_partition) const {
 154   assert (which_partition < NumPartitions, "selected free partition must be valid");
 155   idx_t idx = _rightmosts[int(which_partition)];
 156   // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
 157   // to query the original value of leftmost when leftmost must be adjusted because the interval representing
 158   // which_partition is shrinking after the region that used to be leftmost is retired.
 159   return idx;
 160 }
 161 
 162 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
 163   for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
 164     _membership[partition_id].clear_all();
 165     _leftmosts[partition_id] = _max;
 166     _rightmosts[partition_id] = -1;
 167     _leftmosts_empty[partition_id] = _max;
 168     _rightmosts_empty[partition_id] = -1;;
 169     _capacity[partition_id] = 0;
 170     _used[partition_id] = 0;
 171   }
 172   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 173 }
 174 
 175 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
 176                                                              idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
 177                                                              size_t mutator_region_count, size_t mutator_used) {
 178   _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
 179   _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
 180   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
 181   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
 182 
 183   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 184   _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
 185   _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
 186 
 187   _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 188   _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 189   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 190   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 191 
 192   _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 193   _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 194   _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 195 }
 196 
 197 void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost,
 198                                                                    idx_t old_collector_leftmost_empty,
 199                                                                    idx_t old_collector_rightmost_empty,
 200                                                                    size_t old_collector_region_count, size_t old_collector_used) {
 201   _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost;
 202   _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost;
 203   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty;
 204   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost_empty;
 205 
 206   _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count;
 207   _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used;
 208   _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count * _region_size_bytes;
 209 }
 210 
 211 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
 212   assert (which_partition < NumPartitions, "Partition must be valid");
 213   _used[int(which_partition)] += bytes;
 214   assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
 215           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
 216           _used[int(which_partition)], _capacity[int(which_partition)], bytes);
 217 }
 218 
 219 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
 220   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 221   assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
 222   if (low_idx == leftmost(partition)) {
 223     assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
 224     if (high_idx + 1 == _max) {
 225       _leftmosts[int(partition)] = _max;
 226     } else {
 227       _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
 228     }
 229     if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
 230       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
 231       _leftmosts_empty[int(partition)] = _leftmosts[int(partition)];
 232     }
 233   }
 234   if (high_idx == _rightmosts[int(partition)]) {
 235     assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
 236     if (low_idx == 0) {
 237       _rightmosts[int(partition)] = -1;
 238     } else {
 239       _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
 240     }
 241     if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
 242       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 243       _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
 244     }
 245   }
 246   if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
 247     _leftmosts[int(partition)] = _max;
 248     _rightmosts[int(partition)] = -1;
 249     _leftmosts_empty[int(partition)] = _max;
 250     _rightmosts_empty[int(partition)] = -1;
 251   }
 252 }
 253 
 254 inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) {
 255   shrink_interval_if_range_modifies_either_boundary(partition, idx, idx);
 256 }
 257 
 258 inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition,
 259                                                                              idx_t idx, size_t region_available) {
 260   if (_leftmosts[int(partition)] > idx) {
 261     _leftmosts[int(partition)] = idx;
 262   }
 263   if (_rightmosts[int(partition)] < idx) {
 264     _rightmosts[int(partition)] = idx;
 265   }
 266   if (region_available == _region_size_bytes) {
 267     if (_leftmosts_empty[int(partition)] > idx) {
 268       _leftmosts_empty[int(partition)] = idx;
 269     }
 270     if (_rightmosts_empty[int(partition)] < idx) {
 271       _rightmosts_empty[int(partition)] = idx;
 272     }
 273   }
 274 }
 275 
 276 void ShenandoahRegionPartitions::retire_range_from_partition(
 277   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 278 
 279   // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
 280   assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: " SIZE_FORMAT " and " SIZE_FORMAT " < " SIZE_FORMAT,
 281           low_idx, high_idx, _max);
 282   assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
 283 
 284   for (idx_t idx = low_idx; idx <= high_idx; idx++) {
 285     assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
 286     _membership[int(partition)].clear_bit(idx);
 287   }
 288   _region_counts[int(partition)] -= high_idx + 1 - low_idx;
 289   shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx);
 290 }
 291 
 292 void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) {
 293 
 294   // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
 295   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 296   assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
 297   assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
 298 
 299   if (used_bytes < _region_size_bytes) {
 300     // Count the alignment pad remnant of memory as used when we retire this region
 301     increase_used(partition, _region_size_bytes - used_bytes);
 302   }
 303   _membership[int(partition)].clear_bit(idx);
 304   shrink_interval_if_boundary_modified(partition, idx);
 305   _region_counts[int(partition)]--;
 306 }
 307 
 308 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
 309   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 310   assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
 311   assert (which_partition < NumPartitions, "selected free partition must be valid");
 312   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 313 
 314   _membership[int(which_partition)].set_bit(idx);
 315   _capacity[int(which_partition)] += _region_size_bytes;
 316   _used[int(which_partition)] += _region_size_bytes - available;
 317   expand_interval_if_boundary_modified(which_partition, idx, available);
 318   _region_counts[int(which_partition)]++;
 319 }
 320 
 321 bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) {
 322   return (p == ShenandoahFreeSetPartitionId::Mutator);
 323 }
 324 
 325 bool ShenandoahRegionPartitions::is_young_collector_partition(ShenandoahFreeSetPartitionId p) {
 326   return (p == ShenandoahFreeSetPartitionId::Collector);
 327 }
 328 
 329 bool ShenandoahRegionPartitions::is_old_collector_partition(ShenandoahFreeSetPartitionId p) {
 330   return (p == ShenandoahFreeSetPartitionId::OldCollector);
 331 }
 332 
 333 bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_region) {
 334   return (available_in_region == _region_size_bytes);
 335 }
 336 
 337 
 338 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
 339                                                                   ShenandoahFreeSetPartitionId new_partition, size_t available) {
 340   ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx);
 341   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 342   assert (orig_partition < NumPartitions, "Original partition must be valid");
 343   assert (new_partition < NumPartitions, "New partition must be valid");
 344   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 345   assert (_membership[int(orig_partition)].is_set(idx), "Cannot move from partition unless in partition");
 346   assert ((r != nullptr) && ((r->is_trash() && (available == _region_size_bytes)) ||
 347                              (r->used() + available == _region_size_bytes)),
 348           "Used: " SIZE_FORMAT " + available: " SIZE_FORMAT " should equal region size: " SIZE_FORMAT,
 349           ShenandoahHeap::heap()->get_region(idx)->used(), available, _region_size_bytes);
 350 
 351   // Expected transitions:
 352   //  During rebuild:         Mutator => Collector
 353   //                          Mutator empty => Collector
 354   //                          Mutator empty => OldCollector
 355   //  During flip_to_gc:      Mutator empty => Collector
 356   //                          Mutator empty => OldCollector
 357   // At start of update refs: Collector => Mutator
 358   //                          OldCollector Empty => Mutator
 359   assert ((is_mutator_partition(orig_partition) && is_young_collector_partition(new_partition)) ||
 360           (is_mutator_partition(orig_partition) &&
 361            available_implies_empty(available) && is_old_collector_partition(new_partition)) ||
 362           (is_young_collector_partition(orig_partition) && is_mutator_partition(new_partition)) ||
 363           (is_old_collector_partition(orig_partition)
 364            && available_implies_empty(available) && is_mutator_partition(new_partition)),
 365           "Unexpected movement between partitions, available: " SIZE_FORMAT ", _region_size_bytes: " SIZE_FORMAT
 366           ", orig_partition: %s, new_partition: %s",
 367           available, _region_size_bytes, partition_name(orig_partition), partition_name(new_partition));
 368 
 369   size_t used = _region_size_bytes - available;
 370   assert (_used[int(orig_partition)] >= used,
 371           "Orig partition used: " SIZE_FORMAT " must exceed moved used: " SIZE_FORMAT " within region " SSIZE_FORMAT,
 372           _used[int(orig_partition)], used, idx);
 373 
 374   _membership[int(orig_partition)].clear_bit(idx);
 375   _membership[int(new_partition)].set_bit(idx);
 376 
 377   _capacity[int(orig_partition)] -= _region_size_bytes;
 378   _used[int(orig_partition)] -= used;
 379   shrink_interval_if_boundary_modified(orig_partition, idx);
 380 
 381   _capacity[int(new_partition)] += _region_size_bytes;;
 382   _used[int(new_partition)] += used;
 383   expand_interval_if_boundary_modified(new_partition, idx, available);
 384 
 385   _region_counts[int(orig_partition)]--;
 386   _region_counts[int(new_partition)]++;
 387 }
 388 
 389 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
 390   return partition_name(membership(idx));
 391 }
 392 
 393 inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t idx) const {
 394   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 395   ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree;
 396   for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) {
 397     if (_membership[partition_id].is_set(idx)) {
 398       assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition");
 399       result = (ShenandoahFreeSetPartitionId) partition_id;
 400     }
 401   }
 402   return result;
 403 }
 404 
 405 #ifdef ASSERT
 406 inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const {
 407   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 408   assert (test_partition < ShenandoahFreeSetPartitionId::NotFree, "must be a valid partition");
 409 
 410   return membership(idx) == test_partition;
 411 }
 412 #endif
 413 
 414 inline bool ShenandoahRegionPartitions::is_empty(ShenandoahFreeSetPartitionId which_partition) const {
 415   assert (which_partition < NumPartitions, "selected free partition must be valid");
 416   return (leftmost(which_partition) > rightmost(which_partition));
 417 }
 418 
 419 inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_region(
 420   ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const {
 421   idx_t rightmost_idx = rightmost(which_partition);
 422   idx_t leftmost_idx = leftmost(which_partition);
 423   if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
 424   if (start_index < leftmost_idx) {
 425     start_index = leftmost_idx;
 426   }
 427   idx_t result = _membership[int(which_partition)].find_first_set_bit(start_index, rightmost_idx + 1);
 428   if (result > rightmost_idx) {
 429     result = _max;
 430   }
 431   assert (result >= start_index, "Requires progress");
 432   return result;
 433 }
 434 
 435 inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_region(
 436   ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const {
 437   idx_t rightmost_idx = rightmost(which_partition);
 438   idx_t leftmost_idx = leftmost(which_partition);
 439   // if (leftmost_idx == max) then (last_index < leftmost_idx)
 440   if (last_index < leftmost_idx) return -1;
 441   if (last_index > rightmost_idx) {
 442     last_index = rightmost_idx;
 443   }
 444   idx_t result = _membership[int(which_partition)].find_last_set_bit(-1, last_index);
 445   if (result < leftmost_idx) {
 446     result = -1;
 447   }
 448   assert (result <= last_index, "Requires progress");
 449   return result;
 450 }
 451 
 452 inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_cluster_of_regions(
 453   ShenandoahFreeSetPartitionId which_partition, idx_t start_index, size_t cluster_size) const {
 454   idx_t rightmost_idx = rightmost(which_partition);
 455   idx_t leftmost_idx = leftmost(which_partition);
 456   if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
 457   idx_t result = _membership[int(which_partition)].find_first_consecutive_set_bits(start_index, rightmost_idx + 1, cluster_size);
 458   if (result > rightmost_idx) {
 459     result = _max;
 460   }
 461   assert (result >= start_index, "Requires progress");
 462   return result;
 463 }
 464 
 465 inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_cluster_of_regions(
 466   ShenandoahFreeSetPartitionId which_partition, idx_t last_index, size_t cluster_size) const {
 467   idx_t leftmost_idx = leftmost(which_partition);
 468   // if (leftmost_idx == max) then (last_index < leftmost_idx)
 469   if (last_index < leftmost_idx) return -1;
 470   idx_t result = _membership[int(which_partition)].find_last_consecutive_set_bits(leftmost_idx - 1, last_index, cluster_size);
 471   if (result <= leftmost_idx) {
 472     result = -1;
 473   }
 474   assert (result <= last_index, "Requires progress");
 475   return result;
 476 }
 477 
 478 idx_t ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId which_partition) {
 479   assert (which_partition < NumPartitions, "selected free partition must be valid");
 480   idx_t max_regions = _max;
 481   if (_leftmosts_empty[int(which_partition)] == _max) {
 482     return _max;
 483   }
 484   for (idx_t idx = find_index_of_next_available_region(which_partition, _leftmosts_empty[int(which_partition)]);
 485        idx < max_regions; ) {
 486     assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 487     if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
 488       _leftmosts_empty[int(which_partition)] = idx;
 489       return idx;
 490     }
 491     idx = find_index_of_next_available_region(which_partition, idx + 1);
 492   }
 493   _leftmosts_empty[int(which_partition)] = _max;
 494   _rightmosts_empty[int(which_partition)] = -1;
 495   return _max;
 496 }
 497 
 498 idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId which_partition) {
 499   assert (which_partition < NumPartitions, "selected free partition must be valid");
 500   if (_rightmosts_empty[int(which_partition)] < 0) {
 501     return -1;
 502   }
 503   for (idx_t idx = find_index_of_previous_available_region(which_partition, _rightmosts_empty[int(which_partition)]);
 504        idx >= 0; ) {
 505     assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 506     if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
 507       _rightmosts_empty[int(which_partition)] = idx;
 508       return idx;
 509     }
 510     idx = find_index_of_previous_available_region(which_partition, idx - 1);
 511   }
 512   _leftmosts_empty[int(which_partition)] = _max;
 513   _rightmosts_empty[int(which_partition)] = -1;
 514   return -1;
 515 }
 516 
 517 
 518 #ifdef ASSERT
 519 void ShenandoahRegionPartitions::assert_bounds() {
 520 
 521   idx_t leftmosts[UIntNumPartitions];
 522   idx_t rightmosts[UIntNumPartitions];
 523   idx_t empty_leftmosts[UIntNumPartitions];
 524   idx_t empty_rightmosts[UIntNumPartitions];
 525 
 526   for (uint i = 0; i < UIntNumPartitions; i++) {
 527     leftmosts[i] = _max;
 528     empty_leftmosts[i] = _max;
 529     rightmosts[i] = -1;
 530     empty_rightmosts[i] = -1;
 531   }
 532 
 533   for (idx_t i = 0; i < _max; i++) {
 534     ShenandoahFreeSetPartitionId partition = membership(i);
 535     switch (partition) {
 536       case ShenandoahFreeSetPartitionId::NotFree:
 537         break;
 538 
 539       case ShenandoahFreeSetPartitionId::Mutator:
 540       case ShenandoahFreeSetPartitionId::Collector:
 541       case ShenandoahFreeSetPartitionId::OldCollector:
 542       {
 543         size_t capacity = _free_set->alloc_capacity(i);
 544         bool is_empty = (capacity == _region_size_bytes);
 545         assert(capacity > 0, "free regions must have allocation capacity");
 546         if (i < leftmosts[int(partition)]) {
 547           leftmosts[int(partition)] = i;
 548         }
 549         if (is_empty && (i < empty_leftmosts[int(partition)])) {
 550           empty_leftmosts[int(partition)] = i;
 551         }
 552         if (i > rightmosts[int(partition)]) {
 553           rightmosts[int(partition)] = i;
 554         }
 555         if (is_empty && (i > empty_rightmosts[int(partition)])) {
 556           empty_rightmosts[int(partition)] = i;
 557         }
 558         break;
 559       }
 560 
 561       default:
 562         ShouldNotReachHere();
 563     }
 564   }
 565 
 566   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 567   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) <= _max,
 568           "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Mutator),  _max);
 569   assert (rightmost(ShenandoahFreeSetPartitionId::Mutator) < _max,
 570           "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator),  _max);
 571 
 572   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
 573           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
 574           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::Mutator));
 575   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
 576           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
 577           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator));
 578 
 579   // If Mutator partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 580   // Likewise for empty region partitions.
 581   idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 582   idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 583   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
 584           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 585           beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
 586   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
 587           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 588           end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
 589 
 590   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 591   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 592   assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
 593           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 594           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 595   assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
 596           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 597           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 598 
 599   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 600   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 601           leftmost(ShenandoahFreeSetPartitionId::Collector),  _max);
 602   assert (rightmost(ShenandoahFreeSetPartitionId::Collector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 603           rightmost(ShenandoahFreeSetPartitionId::Collector),  _max);
 604 
 605   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
 606           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
 607           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::Collector));
 608   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
 609           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
 610           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Collector));
 611 
 612   // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 613   // Likewise for empty region partitions.
 614   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 615   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 616   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
 617           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 618           beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
 619   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
 620           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 621           end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
 622 
 623   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 624   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 625   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 626           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 627           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
 628   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 629           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 630           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
 631 
 632   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 633   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 634           leftmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
 635   assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 636           rightmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
 637 
 638   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
 639           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::OldCollector),
 640                                   ShenandoahFreeSetPartitionId::OldCollector),
 641           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::OldCollector));
 642   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
 643           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::OldCollector),
 644                                   ShenandoahFreeSetPartitionId::OldCollector),
 645           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 646 
 647   // If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 648   // Likewise for empty region partitions.
 649   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 650   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 651   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
 652           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 653           beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
 654   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
 655           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 656           end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 657 
 658   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 659   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 660   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 661           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 662           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 663   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 664           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 665           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 666 }
 667 #endif
 668 
 669 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 670   _heap(heap),
 671   _partitions(max_regions, this),
 672   _trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
 673   _alloc_bias_weight(0)
 674 {
 675   clear_internal();
 676 }
 677 
 678 void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
 679   shenandoah_assert_heaplocked();
 680   size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
 681   size_t idx = region->index();
 682   size_t capacity = alloc_capacity(region);
 683   assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree,
 684          "Regions promoted in place should have been excluded from Mutator partition");
 685   if (capacity >= plab_min_size_in_bytes) {
 686     _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, capacity);
 687     _heap->old_generation()->augment_promoted_reserve(capacity);
 688   }

 689 }
 690 
 691 HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition,
 692                                                                       ShenandoahAffiliation affiliation,
 693                                                                       ShenandoahAllocRequest& req, bool& in_new_region) {
 694   shenandoah_assert_heaplocked();
 695   idx_t rightmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
 696                                _partitions.rightmost_empty(which_partition): _partitions.rightmost(which_partition));
 697   idx_t leftmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
 698                               _partitions.leftmost_empty(which_partition): _partitions.leftmost(which_partition));
 699   if (_partitions.alloc_from_left_bias(which_partition)) {
 700     for (idx_t idx = leftmost_collector; idx <= rightmost_collector; ) {
 701       assert(_partitions.in_free_set(which_partition, idx), "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 702       ShenandoahHeapRegion* r = _heap->get_region(idx);
 703       if (r->affiliation() == affiliation) {
 704         HeapWord* result = try_allocate_in(r, req, in_new_region);
 705         if (result != nullptr) {
 706           return result;
 707         }
 708       }
 709       idx = _partitions.find_index_of_next_available_region(which_partition, idx + 1);
 710     }
 711   } else {
 712     for (idx_t idx = rightmost_collector; idx >= leftmost_collector; ) {
 713       assert(_partitions.in_free_set(which_partition, idx),
 714              "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 715       ShenandoahHeapRegion* r = _heap->get_region(idx);
 716       if (r->affiliation() == affiliation) {
 717         HeapWord* result = try_allocate_in(r, req, in_new_region);
 718         if (result != nullptr) {
 719           return result;
 720         }
 721       }
 722       idx = _partitions.find_index_of_previous_available_region(which_partition, idx - 1);
 723     }
 724   }
 725   log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
 726                       shenandoah_affiliation_name(affiliation), p2i(&req));
 727   return nullptr;
 728 }
 729 
 730 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 731   shenandoah_assert_heaplocked();
 732 
 733   // Scan the bitmap looking for a first fit.
 734   //
 735   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 736   // we would find the region to allocate at right away.
 737   //
 738   // Allocations are biased: GC allocations are taken from the high end of the heap.  Regular (and TLAB)
 739   // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
 740   // Humongous mutator allocations are taken from the bottom of the heap.
 741   //
 742   // Free set maintains mutator and collector partitions.  Normally, each allocates only from its partition,
 743   // except in special cases when the collector steals regions from the mutator partition.
 744 
 745   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 746   bool allow_new_region = true;
 747   if (_heap->mode()->is_generational()) {
 748     switch (req.affiliation()) {
 749       case ShenandoahAffiliation::OLD_GENERATION:
 750         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 751         if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
 752           allow_new_region = false;
 753         }
 754         break;
 755 
 756       case ShenandoahAffiliation::YOUNG_GENERATION:
 757         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 758         if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
 759           allow_new_region = false;
 760         }
 761         break;
 762 
 763       case ShenandoahAffiliation::FREE:
 764         fatal("Should request affiliation");
 765 
 766       default:
 767         ShouldNotReachHere();
 768         break;
 769     }
 770   }
 771   switch (req.type()) {
 772     case ShenandoahAllocRequest::_alloc_tlab:
 773     case ShenandoahAllocRequest::_alloc_shared: {

 774       // Try to allocate in the mutator view
 775       if (_alloc_bias_weight-- <= 0) {
 776         // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
 777         // of the heap.  Typically, these are the more recently engaged regions and the objects in these regions have not
 778         // yet had a chance to die (and/or are treated as floating garbage).  If we use the same allocation bias on each
 779         // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
 780         // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
 781         // during the most recent GC pass may once again prevent the region from being collected.  We have found that
 782         // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
 783         // benchmarks.  In the best case, this has the effect of consuming these partially consumed regions before
 784         // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
 785         //
 786         // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
 787         // available regions.  Other potential benefits:
 788         //  1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
 789         //  2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
 790         //     late in the GC cycle.
 791         idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
 792                                      - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
 793         idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
 794                                       - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 795         _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
 796         _alloc_bias_weight = _InitialAllocBiasWeight;
 797       }
 798       if (!_partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)) {
 799         // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
 800         if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 801           // Use signed idx.  Otherwise, loop will never terminate.
 802           idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
 803           for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
 804             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 805                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 806             ShenandoahHeapRegion* r = _heap->get_region(idx);
 807             // try_allocate_in() increases used if the allocation is successful.
 808             HeapWord* result;
 809             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 810             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 811               return result;
 812             }
 813             idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 814           }
 815         }
 816       } else {
 817         // Allocate from low to high memory.  This keeps the range of fully empty regions more tightly packed.
 818         // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle.  So this
 819         // tends to accumulate "fragmented" uncollected regions in high memory.
 820         if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 821           // Use signed idx.  Otherwise, loop will never terminate.
 822           idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
 823           for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
 824             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 825                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 826             ShenandoahHeapRegion* r = _heap->get_region(idx);
 827             // try_allocate_in() increases used if the allocation is successful.
 828             HeapWord* result;
 829             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 830             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 831               return result;
 832             }
 833             idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
 834           }
 835         }
 836       }

 837       // There is no recovery. Mutator does not touch collector view at all.
 838       break;
 839     }
 840     case ShenandoahAllocRequest::_alloc_gclab:
 841       // GCLABs are for evacuation so we must be in evacuation phase.

 842 
 843     case ShenandoahAllocRequest::_alloc_plab: {
 844       // PLABs always reside in old-gen and are only allocated during
 845       // evacuation phase.
 846 
 847     case ShenandoahAllocRequest::_alloc_shared_gc: {
 848       // Fast-path: try to allocate in the collector view first
 849       HeapWord* result;
 850       result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
 851                                                         ShenandoahFreeSetPartitionId::Collector,
 852                                                         req.affiliation(), req, in_new_region);
 853       if (result != nullptr) {
 854         return result;
 855       } else if (allow_new_region) {
 856         // Try a free region that is dedicated to GC allocations.
 857         result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
 858                                                           ShenandoahFreeSetPartitionId::Collector,
 859                                                           ShenandoahAffiliation::FREE, req, in_new_region);
 860         if (result != nullptr) {
 861           return result;
 862         }
 863       }
 864 
 865       // No dice. Can we borrow space from mutator view?
 866       if (!ShenandoahEvacReserveOverflow) {
 867         return nullptr;
 868       }
 869       if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
 870         // This allows us to flip a mutator region to old_collector
 871         allow_new_region = true;
 872       }
 873 
 874       // We should expand old-gen if this can prevent an old-gen evacuation failure.  We don't care so much about
 875       // promotion failures since they can be mitigated in a subsequent GC pass.  Would be nice to know if this
 876       // allocation request is for evacuation or promotion.  Individual threads limit their use of PLAB memory for
 877       // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
 878       // only for old-gen evacuations.
 879       if (allow_new_region) {
 880         // Try to steal an empty region from the mutator view.
 881         idx_t rightmost_mutator = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 882         idx_t leftmost_mutator =  _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 883         for (idx_t idx = rightmost_mutator; idx >= leftmost_mutator; ) {
 884           assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 885                  "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 886           ShenandoahHeapRegion* r = _heap->get_region(idx);
 887           if (can_allocate_from(r)) {
 888             if (req.is_old()) {
 889               flip_to_old_gc(r);
 890             } else {
 891               flip_to_gc(r);
 892             }
 893             // Region r is entirely empty.  If try_allocat_in fails on region r, something else is really wrong.
 894             // Don't bother to retry with other regions.
 895             log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 896             return try_allocate_in(r, req, in_new_region);
 897           }
 898           idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 899         }
 900       }
 901       // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
 902       // due to GC allocations would expose unparsable mutator allocations.



 903       break;
 904     }
 905     }
 906     default:
 907       ShouldNotReachHere();
 908   }

 909   return nullptr;
 910 }
 911 
 912 // This work method takes an argument corresponding to the number of bytes
 913 // free in a region, and returns the largest amount in heapwords that can be allocated
 914 // such that both of the following conditions are satisfied:
 915 //
 916 // 1. it is a multiple of card size
 917 // 2. any remaining shard may be filled with a filler object
 918 //
 919 // The idea is that the allocation starts and ends at card boundaries. Because
 920 // a region ('s end) is card-aligned, the remainder shard that must be filled is
 921 // at the start of the free space.
 922 //
 923 // This is merely a helper method to use for the purpose of such a calculation.
 924 size_t ShenandoahFreeSet::get_usable_free_words(size_t free_bytes) const {
 925   // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
 926   //      free is 514
 927   //      usable_free is 512, which is decreased to 0
 928   size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
 929   assert(usable_free <= free_bytes, "Sanity check");
 930   if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
 931     // After aligning to card multiples, the remainder would be smaller than
 932     // the minimum filler object, so we'll need to take away another card's
 933     // worth to construct a filler object.
 934     if (usable_free >= CardTable::card_size()) {
 935       usable_free -= CardTable::card_size();
 936     } else {
 937       assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
 938     }
 939   }
 940 
 941   return usable_free / HeapWordSize;
 942 }
 943 
 944 // Given a size argument, which is a multiple of card size, a request struct
 945 // for a PLAB, and an old region, return a pointer to the allocated space for
 946 // a PLAB which is card-aligned and where any remaining shard in the region
 947 // has been suitably filled by a filler object.
 948 // It is assumed (and assertion-checked) that such an allocation is always possible.
 949 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
 950   assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 951   assert(r->is_old(), "All PLABs reside in old-gen");
 952   assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
 953   assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design");
 954 
 955   HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
 956   assert(result != nullptr, "Allocation cannot fail");
 957   assert(r->top() <= r->end(), "Allocation cannot span end of region");
 958   assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
 959   return result;
 960 }
 961 
 962 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 963   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 964   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
 965     return nullptr;
 966   }
 967   HeapWord* result = nullptr;
 968   try_recycle_trashed(r);

 969   in_new_region = r->is_empty();
 970 
 971   if (in_new_region) {
 972     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 973                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
 974     assert(!r->is_affiliated(), "New region " SIZE_FORMAT " should be unaffiliated", r->index());
 975     r->set_affiliation(req.affiliation());
 976     if (r->is_old()) {
 977       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
 978       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
 979       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
 980       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
 981       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
 982       // coalesce-and-fill processing.
 983       r->end_preemptible_coalesce_and_fill();
 984       _heap->old_generation()->clear_cards_for(r);
 985     }
 986     _heap->generation_for(r->affiliation())->increment_affiliated_region_count();
 987 
 988 #ifdef ASSERT
 989     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
 990     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
 991     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
 992 #endif
 993     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 994                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
 995   } else {
 996     assert(r->is_affiliated(), "Region " SIZE_FORMAT " that is not new should be affiliated", r->index());
 997     if (r->affiliation() != req.affiliation()) {
 998       assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
 999              req.affiliation_name(), r->affiliation_name());
1000       return nullptr;
1001     }
1002   }
1003 
1004   // req.size() is in words, r->free() is in bytes.
1005   if (req.is_lab_alloc()) {
1006     size_t adjusted_size = req.size();
1007     size_t free = r->free();    // free represents bytes available within region r
1008     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1009       // This is a PLAB allocation
1010       assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
1011       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
1012              "PLABS must be allocated in old_collector_free regions");
1013 
1014       // Need to assure that plabs are aligned on multiple of card region
1015       // Convert free from unaligned bytes to aligned number of words
1016       size_t usable_free = get_usable_free_words(free);
1017       if (adjusted_size > usable_free) {
1018         adjusted_size = usable_free;
1019       }
1020       adjusted_size = align_down(adjusted_size, CardTable::card_size_in_words());
1021       if (adjusted_size >= req.min_size()) {
1022         result = allocate_aligned_plab(adjusted_size, req, r);
1023         assert(result != nullptr, "allocate must succeed");
1024         req.set_actual_size(adjusted_size);
1025       } else {
1026         // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
1027         log_trace(gc, free)("Failed to shrink PLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1028                             " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1029       }
1030     } else {
1031       // This is a GCLAB or a TLAB allocation
1032       // Convert free from unaligned bytes to aligned number of words
1033       free = align_down(free >> LogHeapWordSize, MinObjAlignment);
1034       if (adjusted_size > free) {
1035         adjusted_size = free;
1036       }
1037       if (adjusted_size >= req.min_size()) {
1038         result = r->allocate(adjusted_size, req);
1039         assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
1040         req.set_actual_size(adjusted_size);
1041       } else {
1042         log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1043                             " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1044       }
1045     }
1046   } else {
1047     size_t size = req.size();
1048     result = r->allocate(size, req);
1049     if (result != nullptr) {
1050       // Record actual allocation size
1051       req.set_actual_size(size);
1052     }
1053   }
1054 
1055   if (result != nullptr) {
1056     // Allocation successful, bump stats:
1057     if (req.is_mutator_alloc()) {
1058       assert(req.is_young(), "Mutator allocations always come from young generation.");
1059       _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
1060     } else {
1061       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");

1062 
1063       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
1064       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
1065       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
1066       r->set_update_watermark(r->top());
1067       if (r->is_old()) {
1068         _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, req.actual_size() * HeapWordSize);
1069         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
1070         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
1071       } else {
1072         _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, req.actual_size() * HeapWordSize);
1073       }
1074     }
1075   }
1076 
1077   static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
1078   size_t ac = alloc_capacity(r);
1079 
1080   if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
1081     // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
1082     // Note that retire_from_partition() increases used to account for waste.
1083 
1084     // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
1085     // then retire the region so that subsequent searches can find available memory more quickly.
1086 
1087     size_t idx = r->index();
1088     ShenandoahFreeSetPartitionId orig_partition;
1089     if (req.is_mutator_alloc()) {
1090       orig_partition = ShenandoahFreeSetPartitionId::Mutator;
1091     } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) {
1092       orig_partition = ShenandoahFreeSetPartitionId::Collector;
1093     } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1094       orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1095     } else {
1096       assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type");
1097       if (req.is_old()) {
1098         orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1099       } else {
1100         orig_partition = ShenandoahFreeSetPartitionId::Collector;
1101       }
1102     }
1103     _partitions.retire_from_partition(orig_partition, idx, r->used());
1104     _partitions.assert_bounds();







1105   }
1106   return result;
1107 }
1108 
































1109 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
1110   assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
1111   shenandoah_assert_heaplocked();
1112 
1113   size_t words_size = req.size();
1114   idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1115 
1116   assert(req.is_young(), "Humongous regions always allocated in YOUNG");
1117   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
1118 
1119   // Check if there are enough regions left to satisfy allocation.
1120   if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
1121     return nullptr;
1122   }
1123 
1124   idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
1125   idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
1126   idx_t last_possible_start = end_range - num;
1127 
1128   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
1129   // inclusive. Contiguous allocations are biased to the beginning.
1130   idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
1131                                                                           start_range, num);
1132   if (beg > last_possible_start) {
1133     // Hit the end, goodbye
1134     return nullptr;
1135   }
1136   idx_t end = beg;
1137 
1138   while (true) {
1139     // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
1140     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.  If we can extend
1141     // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
1142     while (!can_allocate_from(_heap->get_region(end))) {
1143       // region[end] is not empty, so we restart our search after region[end]
1144       idx_t slide_delta = end + 1 - beg;
1145       if (beg + slide_delta > last_possible_start) {
1146         // no room to slide
1147         return nullptr;
1148       }
1149       for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
1150         if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
1151           beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
1152                                                                             span_end + 1, num);
1153           break;
1154         } else {
1155           beg++;
1156           span_end++;
1157         }
1158       }
1159       // Here, either beg identifies a range of num regions all of which are in the Mutator free set, or beg > last_possible_start
1160       if (beg > last_possible_start) {
1161         // Hit the end, goodbye
1162         return nullptr;
1163       }
1164       end = beg;
1165     }
1166 
1167     if ((end - beg + 1) == num) {
1168       // found the match
1169       break;
1170     }
1171 
1172     end++;
1173   }
1174 
1175   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();

1176   // Initialize regions:
1177   for (idx_t i = beg; i <= end; i++) {
1178     ShenandoahHeapRegion* r = _heap->get_region(i);
1179     try_recycle_trashed(r);
1180 
1181     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
1182     assert(r->is_empty(), "Should be empty");
1183 
1184     if (i == beg) {
1185       r->make_humongous_start();
1186     } else {
1187       r->make_humongous_cont();
1188     }
1189 
1190     // Trailing region may be non-full, record the remainder there
1191     size_t used_words;
1192     if ((i == end) && (remainder != 0)) {
1193       used_words = remainder;
1194     } else {
1195       used_words = ShenandoahHeapRegion::region_size_words();
1196     }
1197 
1198     r->set_affiliation(req.affiliation());
1199     r->set_update_watermark(r->bottom());
1200     r->set_top(r->bottom() + used_words);


1201   }
1202   generation->increase_affiliated_region_count(num);




1203   if (remainder != 0) {
1204     // Record this remainder as allocation waste
1205     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
1206   }
1207 
1208   // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
1209   _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);



1210 
1211   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
1212   _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
1213   _partitions.assert_bounds();
1214   req.set_actual_size(words_size);
1215   if (remainder != 0) {
1216     req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);











1217   }
1218   return _heap->get_region(beg)->bottom();
1219 }
1220 
1221 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) {




1222   if (r->is_trash()) {

1223     r->recycle();
1224   }
1225 }
1226 
1227 void ShenandoahFreeSet::recycle_trash() {
1228   // lock is not reentrable, check we don't have it
1229   shenandoah_assert_not_heaplocked();
1230   size_t count = 0;
1231   for (size_t i = 0; i < _heap->num_regions(); i++) {
1232     ShenandoahHeapRegion* r = _heap->get_region(i);
1233     if (r->is_trash()) {
1234       _trash_regions[count++] = r;

1235     }
1236   }
1237 
1238   size_t total_batches = 0;
1239   jlong batch_start_time = 0;
1240   jlong recycle_trash_start_time = os::javaTimeNanos();    // This value will be treated as the initial batch_start_time
1241   jlong batch_end_time = recycle_trash_start_time;
1242   // Process as many batches as can be processed within 10 us.
1243   static constexpr jlong deadline_ns = 10000;               // 10 us
1244   size_t idx = 0;
1245   jlong predicted_next_batch_end_time;
1246   jlong batch_process_time_estimate = 0;
1247   while (idx < count) {
1248     if (idx > 0) {
1249       os::naked_yield(); // Yield to allow allocators to take the lock, except on the first iteration
1250     }
1251     // Avoid another call to javaTimeNanos() if we already know time at which last batch ended
1252     batch_start_time = batch_end_time;
1253     const jlong deadline = batch_start_time + deadline_ns;
1254 
1255     ShenandoahHeapLocker locker(_heap->lock());
1256     do {
1257       // Measurements on typical 2024 hardware suggest it typically requires between 1400 and 2000 ns to process a batch of
1258       // 32 regions, assuming low contention with other threads.  Sometimes this goes higher, when mutator threads
1259       // are contending for CPU cores and/or the heap lock.  On this hardware with a 10 us deadline, we expect 3-6 batches
1260       // to be processed between yields most of the time.
1261       //
1262       // Note that deadline is enforced since the end of previous batch.  In the case that yield() or acquisition of heap lock
1263       // takes a "long time", we will have less time to process regions, but we will always process at least one batch between
1264       // yields.  Yielding more frequently when there is heavy contention for the heap lock or for CPU cores is considered the
1265       // right thing to do.
1266       const size_t REGIONS_PER_BATCH = 32;
1267       size_t max_idx = MIN2(count, idx + REGIONS_PER_BATCH);
1268       while (idx < max_idx) {
1269         try_recycle_trashed(_trash_regions[idx++]);
1270       }
1271       total_batches++;
1272       batch_end_time = os::javaTimeNanos();
1273       // Estimate includes historic combination of yield times and heap lock acquisition times.
1274       batch_process_time_estimate = (batch_end_time - recycle_trash_start_time) / total_batches;
1275       predicted_next_batch_end_time = batch_end_time + batch_process_time_estimate;
1276     } while ((idx < count) && (predicted_next_batch_end_time < deadline));
1277   }
1278 }
1279 
1280 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
1281   size_t idx = r->index();
1282 
1283   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1284   assert(can_allocate_from(r), "Should not be allocated");
1285 
1286   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
1287   size_t region_capacity = alloc_capacity(r);
1288   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1289                                                ShenandoahFreeSetPartitionId::OldCollector, region_capacity);
1290   _partitions.assert_bounds();
1291   _heap->old_generation()->augment_evacuation_reserve(region_capacity);
1292   bool transferred = gen_heap->generation_sizer()->transfer_to_old(1);
1293   if (!transferred) {
1294     log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx);
1295     gen_heap->generation_sizer()->force_transfer_to_old(1);
1296   }
1297   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1298   // to recycle trash before attempting to allocate anything in the region.
1299 }
1300 
1301 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1302   size_t idx = r->index();
1303 
1304   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1305   assert(can_allocate_from(r), "Should not be allocated");
1306 
1307   size_t ac = alloc_capacity(r);
1308   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1309                                                ShenandoahFreeSetPartitionId::Collector, ac);
1310   _partitions.assert_bounds();
1311 
1312   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1313   // to recycle trash before attempting to allocate anything in the region.
1314 }
1315 
1316 void ShenandoahFreeSet::clear() {
1317   shenandoah_assert_heaplocked();
1318   clear_internal();
1319 }
1320 
1321 void ShenandoahFreeSet::clear_internal() {
1322   _partitions.make_all_regions_unavailable();
1323 
1324   _alloc_bias_weight = 0;
1325   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true);
1326   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Collector, false);
1327   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false);


1328 }
1329 
1330 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
1331                                                          size_t &first_old_region, size_t &last_old_region,
1332                                                          size_t &old_region_count) {
1333   clear_internal();
1334 
1335   first_old_region = _heap->num_regions();
1336   last_old_region = 0;
1337   old_region_count = 0;
1338   old_cset_regions = 0;
1339   young_cset_regions = 0;
1340 
1341   size_t region_size_bytes = _partitions.region_size_bytes();
1342   size_t max_regions = _partitions.max_regions();
1343 
1344   size_t mutator_leftmost = max_regions;
1345   size_t mutator_rightmost = 0;
1346   size_t mutator_leftmost_empty = max_regions;
1347   size_t mutator_rightmost_empty = 0;
1348   size_t mutator_regions = 0;
1349   size_t mutator_used = 0;
1350 
1351   size_t old_collector_leftmost = max_regions;
1352   size_t old_collector_rightmost = 0;
1353   size_t old_collector_leftmost_empty = max_regions;
1354   size_t old_collector_rightmost_empty = 0;
1355   size_t old_collector_regions = 0;
1356   size_t old_collector_used = 0;
1357 
1358   size_t num_regions = _heap->num_regions();
1359   for (size_t idx = 0; idx < num_regions; idx++) {
1360     ShenandoahHeapRegion* region = _heap->get_region(idx);
1361     if (region->is_trash()) {
1362       // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
1363       // The cset regions are not "trashed" until we have finished update refs.
1364       if (region->is_old()) {
1365         old_cset_regions++;
1366       } else {
1367         assert(region->is_young(), "Trashed region should be old or young");
1368         young_cset_regions++;
1369       }
1370     } else if (region->is_old()) {
1371       // count both humongous and regular regions, but don't count trash (cset) regions.
1372       old_region_count++;
1373       if (first_old_region > idx) {
1374         first_old_region = idx;
1375       }
1376       last_old_region = idx;
1377     }
1378     if (region->is_alloc_allowed() || region->is_trash()) {
1379       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1380 
1381       // Do not add regions that would almost surely fail allocation
1382       size_t ac = alloc_capacity(region);
1383       if (ac > PLAB::min_size() * HeapWordSize) {
1384         if (region->is_trash() || !region->is_old()) {
1385           // Both young and old collected regions (trashed) are placed into the Mutator set
1386           _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
1387           if (idx < mutator_leftmost) {
1388             mutator_leftmost = idx;
1389           }
1390           if (idx > mutator_rightmost) {
1391             mutator_rightmost = idx;
1392           }
1393           if (ac == region_size_bytes) {
1394             if (idx < mutator_leftmost_empty) {
1395               mutator_leftmost_empty = idx;
1396             }
1397             if (idx > mutator_rightmost_empty) {
1398               mutator_rightmost_empty = idx;
1399             }
1400           }
1401           mutator_regions++;
1402           mutator_used += (region_size_bytes - ac);
1403         } else {
1404           // !region->is_trash() && region is_old()
1405           _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::OldCollector);
1406           if (idx < old_collector_leftmost) {
1407             old_collector_leftmost = idx;
1408           }
1409           if (idx > old_collector_rightmost) {
1410             old_collector_rightmost = idx;
1411           }
1412           if (ac == region_size_bytes) {
1413             if (idx < old_collector_leftmost_empty) {
1414               old_collector_leftmost_empty = idx;
1415             }
1416             if (idx > old_collector_rightmost_empty) {
1417               old_collector_rightmost_empty = idx;
1418             }
1419           }
1420           old_collector_regions++;
1421           old_collector_used += (region_size_bytes - ac);
1422         }
1423       }
1424     }
1425   }
1426   log_debug(gc)("  At end of prep_to_rebuild, mutator_leftmost: " SIZE_FORMAT
1427                 ", mutator_rightmost: " SIZE_FORMAT
1428                 ", mutator_leftmost_empty: " SIZE_FORMAT
1429                 ", mutator_rightmost_empty: " SIZE_FORMAT
1430                 ", mutator_regions: " SIZE_FORMAT
1431                 ", mutator_used: " SIZE_FORMAT,
1432                 mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
1433                 mutator_regions, mutator_used);
1434 
1435   log_debug(gc)("  old_collector_leftmost: " SIZE_FORMAT
1436                 ", old_collector_rightmost: " SIZE_FORMAT
1437                 ", old_collector_leftmost_empty: " SIZE_FORMAT
1438                 ", old_collector_rightmost_empty: " SIZE_FORMAT
1439                 ", old_collector_regions: " SIZE_FORMAT
1440                 ", old_collector_used: " SIZE_FORMAT,
1441                 old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty,
1442                 old_collector_regions, old_collector_used);
1443 
1444   idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost;
1445   idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty;
1446   _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx,
1447                                           mutator_regions, mutator_used);
1448   rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost;
1449   rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty;
1450   _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, old_collector_leftmost_empty,
1451                                                 rightmost_empty_idx, old_collector_regions, old_collector_used);
1452   log_debug(gc)("  After find_regions_with_alloc_capacity(), Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1453                 "  Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1454                 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1455                 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1456                 _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1457                 _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1458 }
1459 
1460 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1461 size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
1462                                                                                    size_t max_xfer_regions,
1463                                                                                    size_t& bytes_transferred) {
1464   shenandoah_assert_heaplocked();
1465   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1466   size_t transferred_regions = 0;
1467   idx_t rightmost = _partitions.rightmost_empty(which_collector);
1468   for (idx_t idx = _partitions.leftmost_empty(which_collector); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
1469     assert(_partitions.in_free_set(which_collector, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1470     // Note: can_allocate_from() denotes that region is entirely empty
1471     if (can_allocate_from(idx)) {
1472       _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1473       transferred_regions++;
1474       bytes_transferred += region_size_bytes;
1475     }
1476     idx = _partitions.find_index_of_next_available_region(which_collector, idx + 1);
1477   }
1478   return transferred_regions;
1479 }
1480 
1481 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1482 size_t ShenandoahFreeSet::transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId collector_id,
1483                                                                                        size_t max_xfer_regions,
1484                                                                                        size_t& bytes_transferred) {
1485   shenandoah_assert_heaplocked();
1486   size_t transferred_regions = 0;
1487   idx_t rightmost = _partitions.rightmost(collector_id);
1488   for (idx_t idx = _partitions.leftmost(collector_id); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
1489     assert(_partitions.in_free_set(collector_id, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1490     size_t ac = alloc_capacity(idx);
1491     if (ac > 0) {
1492       _partitions.move_from_partition_to_partition(idx, collector_id, ShenandoahFreeSetPartitionId::Mutator, ac);
1493       transferred_regions++;
1494       bytes_transferred += ac;
1495     }
1496     idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1497   }
1498   return transferred_regions;
1499 }
1500 
1501 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
1502   size_t collector_xfer = 0;
1503   size_t old_collector_xfer = 0;
1504 
1505   // Process empty regions within the Collector free partition
1506   if ((max_xfer_regions > 0) &&
1507       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1508        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1509     ShenandoahHeapLocker locker(_heap->lock());
1510     max_xfer_regions -=
1511       transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1512                                                                collector_xfer);
1513   }
1514 
1515   // Process empty regions within the OldCollector free partition
1516   if ((max_xfer_regions > 0) &&
1517       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)
1518        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector))) {
1519     ShenandoahHeapLocker locker(_heap->lock());
1520     size_t old_collector_regions =
1521       transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions,
1522                                                                old_collector_xfer);
1523     max_xfer_regions -= old_collector_regions;
1524     if (old_collector_regions > 0) {
1525       ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions);
1526     }
1527   }
1528 
1529   // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1530   if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1531                                  <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1532     ShenandoahHeapLocker locker(_heap->lock());
1533     max_xfer_regions -=
1534       transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1535                                                                    collector_xfer);
1536   }
1537 
1538   size_t total_xfer = collector_xfer + old_collector_xfer;
1539   log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1540                      SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1541                      byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1542                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1543                      byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer));
1544 }
1545 
1546 
1547 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1548 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
1549                                            size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
1550   shenandoah_assert_heaplocked();
1551   // This resets all state information, removing all regions from all sets.
1552   clear();
1553   log_debug(gc, free)("Rebuilding FreeSet");
1554 
1555   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1556   // mutator set otherwise.  All trashed (cset) regions are affiliated young and placed in mutator set.
1557   find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
1558 }
1559 
1560 void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count) {
1561   assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity");
1562   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1563     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
1564     ShenandoahOldGeneration* old_gen = heap->old_generation();
1565     ShenandoahYoungGeneration* young_gen = heap->young_generation();
1566     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1567 
1568     size_t original_old_capacity = old_gen->max_capacity();
1569     size_t new_old_capacity = old_region_count * region_size_bytes;
1570     size_t new_young_capacity = young_region_count * region_size_bytes;
1571     old_gen->set_capacity(new_old_capacity);
1572     young_gen->set_capacity(new_young_capacity);
1573 
1574     if (new_old_capacity > original_old_capacity) {
1575       size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes;
1576       log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
1577                    region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity));
1578     } else if (new_old_capacity < original_old_capacity) {
1579       size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes;
1580       log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
1581                    region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity));
1582     }
1583     // This balances generations, so clear any pending request to balance.
1584     old_gen->set_region_balance(0);
1585   }
1586 }
1587 
1588 void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count,
1589                                        bool have_evacuation_reserves) {
1590   shenandoah_assert_heaplocked();
1591   size_t young_reserve(0), old_reserve(0);
1592 
1593   if (_heap->mode()->is_generational()) {
1594     compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
1595                                    young_reserve, old_reserve);
1596   } else {
1597     young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1598     old_reserve = 0;
1599   }
1600 
1601   // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy
1602   // young_reserve and old_reserve.
1603   reserve_regions(young_reserve, old_reserve, old_region_count);
1604   size_t young_region_count = _heap->num_regions() - old_region_count;
1605   establish_generation_sizes(young_region_count, old_region_count);
1606   establish_old_collector_alloc_bias();
1607   _partitions.assert_bounds();
1608   log_status();
1609 }
1610 
1611 void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
1612                                                        bool have_evacuation_reserves,
1613                                                        size_t& young_reserve_result, size_t& old_reserve_result) const {
1614   shenandoah_assert_generational();
1615   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1616 
1617   ShenandoahOldGeneration* const old_generation = _heap->old_generation();
1618   size_t old_available = old_generation->available();
1619   size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions();
1620   ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
1621   size_t young_capacity = young_generation->max_capacity();
1622   size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
1623 
1624   // Add in the regions we anticipate to be freed by evacuation of the collection set
1625   old_unaffiliated_regions += old_cset_regions;
1626   young_unaffiliated_regions += young_cset_regions;
1627 
1628   // Consult old-region balance to make adjustments to current generation capacities and availability.
1629   // The generation region transfers take place after we rebuild.
1630   const ssize_t old_region_balance = old_generation->get_region_balance();
1631   if (old_region_balance != 0) {
1632 #ifdef ASSERT
1633     if (old_region_balance > 0) {
1634       assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1635     } else {
1636       assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1637     }
1638 #endif
1639 
1640     ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
1641     old_available -= xfer_bytes;
1642     old_unaffiliated_regions -= old_region_balance;
1643     young_capacity += xfer_bytes;
1644     young_unaffiliated_regions += old_region_balance;
1645   }
1646 
1647   // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1648   // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
1649   // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
1650   // each PLAB's available memory.
1651   if (have_evacuation_reserves) {
1652     // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1653     const size_t promoted_reserve = old_generation->get_promoted_reserve();
1654     const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
1655     young_reserve_result = young_generation->get_evacuation_reserve();
1656     old_reserve_result = promoted_reserve + old_evac_reserve;
1657     assert(old_reserve_result <= old_available,
1658            "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1659            promoted_reserve, old_evac_reserve, old_available);
1660   } else {
1661     // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1662     young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
1663     // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1664     // Affiliated old-gen regions are already in the OldCollector free set.  Add in the relevant number of
1665     // unaffiliated regions.
1666     old_reserve_result = old_available;
1667   }
1668 
1669   // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1670   // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
1671   // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1672   // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1673   if (old_reserve_result >
1674       _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1675     old_reserve_result =
1676       _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
1677   }
1678 
1679   if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
1680     young_reserve_result = young_unaffiliated_regions * region_size_bytes;
1681   }
1682 }
1683 
1684 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1685 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1686 // into the collector set or old collector set in order to assure that the memory available for allocations within
1687 // the collector set is at least to_reserve and the memory available for allocations within the old collector set
1688 // is at least to_reserve_old.
1689 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) {
1690   for (size_t i = _heap->num_regions(); i > 0; i--) {
1691     size_t idx = i - 1;
1692     ShenandoahHeapRegion* r = _heap->get_region(idx);
1693     if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1694       continue;
1695     }
1696 
1697     size_t ac = alloc_capacity(r);
1698     assert (ac > 0, "Membership in free set implies has capacity");
1699     assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD");
1700 
1701     bool move_to_old_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old;
1702     bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1703 
1704     if (!move_to_collector && !move_to_old_collector) {
1705       // We've satisfied both to_reserve and to_reserved_old
1706       break;
1707     }
1708 
1709     if (move_to_old_collector) {
1710       // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher
1711       // addresses than Collector regions.  Presumably, OldCollector regions are more "stable" and less likely to
1712       // be collected in the near future.
1713       if (r->is_trash() || !r->is_affiliated()) {
1714         // OLD regions that have available memory are already in the old_collector free set.
1715         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1716                                                      ShenandoahFreeSetPartitionId::OldCollector, ac);
1717         log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1718         log_debug(gc)("  Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1719                       "  Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1720                       _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1721                       _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1722                       _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1723                       _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1724         old_region_count++;
1725         continue;
1726       }
1727     }
1728 
1729     if (move_to_collector) {
1730       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1731       // they were entirely empty.  This has the effect of causing new Mutator allocation to reside next to objects
1732       // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1733       // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1734       // survivor objects is less likely to be selected for the collection set.  This alternative implementation allows
1735       // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1736       // occupy regions comprised entirely of ephemeral objects.  These regions are highly likely to be included in the next
1737       // collection set, and they are easily evacuated because they have low density of live objects.
1738       _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1739                                                    ShenandoahFreeSetPartitionId::Collector, ac);
1740       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1741       log_debug(gc)("  Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1742                     "  Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1743                     _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1744                     _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1745                     _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1746                     _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));
1747     }
1748   }
1749 
1750   if (LogTarget(Info, gc, free)::is_enabled()) {
1751     size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
1752     if (old_reserve < to_reserve_old) {
1753       log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
1754                          PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
1755     }
1756     size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
1757     if (reserve < to_reserve) {
1758       log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1759                     PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
1760     }
1761   }
1762 }
1763 
1764 void ShenandoahFreeSet::establish_old_collector_alloc_bias() {
1765   ShenandoahHeap* heap = ShenandoahHeap::heap();
1766   shenandoah_assert_heaplocked();
1767 
1768   idx_t left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
1769   idx_t right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector);
1770   idx_t middle = (left_idx + right_idx) / 2;
1771   size_t available_in_first_half = 0;
1772   size_t available_in_second_half = 0;
1773 
1774   for (idx_t index = left_idx; index < middle; index++) {
1775     if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1776       ShenandoahHeapRegion* r = heap->get_region((size_t) index);
1777       available_in_first_half += r->free();
1778     }
1779   }
1780   for (idx_t index = middle; index <= right_idx; index++) {
1781     if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1782       ShenandoahHeapRegion* r = heap->get_region(index);
1783       available_in_second_half += r->free();
1784     }
1785   }
1786 
1787   // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
1788   // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
1789   // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
1790   // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
1791   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector,
1792                                           (available_in_second_half > available_in_first_half));
1793 }
1794 
1795 void ShenandoahFreeSet::log_status_under_lock() {
1796   // Must not be heap locked, it acquires heap lock only when log is enabled
1797   shenandoah_assert_not_heaplocked();
1798   if (LogTarget(Info, gc, free)::is_enabled()
1799       DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
1800     ShenandoahHeapLocker locker(_heap->lock());
1801     log_status();
1802   }
1803 }
1804 
1805 void ShenandoahFreeSet::log_status() {
1806   shenandoah_assert_heaplocked();
1807 
1808 #ifdef ASSERT
1809   // Dump of the FreeSet details is only enabled if assertions are enabled
1810   if (LogTarget(Debug, gc, free)::is_enabled()) {
1811 #define BUFFER_SIZE 80
1812     size_t retired_old = 0;
1813     size_t retired_old_humongous = 0;
1814     size_t retired_young = 0;
1815     size_t retired_young_humongous = 0;
1816     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1817     size_t retired_young_waste = 0;
1818     size_t retired_old_waste = 0;
1819     size_t consumed_collector = 0;
1820     size_t consumed_old_collector = 0;
1821     size_t consumed_mutator = 0;
1822     size_t available_old = 0;
1823     size_t available_young = 0;
1824     size_t available_mutator = 0;
1825     size_t available_collector = 0;
1826     size_t available_old_collector = 0;
1827 
1828     char buffer[BUFFER_SIZE];
1829     for (uint i = 0; i < BUFFER_SIZE; i++) {
1830       buffer[i] = '\0';
1831     }
1832 
1833     log_debug(gc)("FreeSet map legend:"
1834                        " M:mutator_free C:collector_free O:old_collector_free"
1835                        " H:humongous ~:retired old _:retired young");
1836     log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocating from %s, "
1837                   " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1838                   "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
1839                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1840                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1841                   _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left",
1842                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1843                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector),
1844                   _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1845                   _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector),
1846                   _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left");
1847 
1848     for (uint i = 0; i < _heap->num_regions(); i++) {
1849       ShenandoahHeapRegion *r = _heap->get_region(i);
1850       uint idx = i % 64;
1851       if ((i != 0) && (idx == 0)) {
1852         log_debug(gc)(" %6u: %s", i-64, buffer);
1853       }
1854       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
1855         size_t capacity = alloc_capacity(r);
1856         assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in mutator_free set");
1857         available_mutator += capacity;
1858         consumed_mutator += region_size_bytes - capacity;
1859         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1860       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
1861         size_t capacity = alloc_capacity(r);
1862         assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in collector_free set");
1863         available_collector += capacity;
1864         consumed_collector += region_size_bytes - capacity;
1865         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
1866       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, i)) {
1867         size_t capacity = alloc_capacity(r);
1868         available_old_collector += capacity;
1869         consumed_old_collector += region_size_bytes - capacity;
1870         buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
1871       } else if (r->is_humongous()) {
1872         if (r->is_old()) {
1873           buffer[idx] = 'H';
1874           retired_old_humongous += region_size_bytes;
1875         } else {
1876           buffer[idx] = 'h';
1877           retired_young_humongous += region_size_bytes;
1878         }
1879       } else {
1880         if (r->is_old()) {
1881           buffer[idx] = '~';
1882           retired_old_waste += alloc_capacity(r);
1883           retired_old += region_size_bytes;
1884         } else {
1885           buffer[idx] = '_';
1886           retired_young_waste += alloc_capacity(r);
1887           retired_young += region_size_bytes;
1888         }
1889       }
1890     }
1891     uint remnant = _heap->num_regions() % 64;
1892     if (remnant > 0) {
1893       buffer[remnant] = '\0';
1894     } else {
1895       remnant = 64;
1896     }
1897     log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1898   }
1899 #endif
1900 
1901   LogTarget(Info, gc, free) lt;
1902   if (lt.is_enabled()) {
1903     ResourceMark rm;
1904     LogStream ls(lt);
1905 
1906     {
1907       idx_t last_idx = 0;
1908       size_t max = 0;
1909       size_t max_contig = 0;
1910       size_t empty_contig = 0;
1911 
1912       size_t total_used = 0;
1913       size_t total_free = 0;
1914       size_t total_free_ext = 0;
1915 
1916       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
1917            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) {
1918         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1919           ShenandoahHeapRegion *r = _heap->get_region(idx);
1920           size_t free = alloc_capacity(r);

1921           max = MAX2(max, free);

1922           if (r->is_empty()) {
1923             total_free_ext += free;
1924             if (last_idx + 1 == idx) {
1925               empty_contig++;
1926             } else {
1927               empty_contig = 1;
1928             }
1929           } else {
1930             empty_contig = 0;
1931           }

1932           total_used += r->used();
1933           total_free += free;

1934           max_contig = MAX2(max_contig, empty_contig);
1935           last_idx = idx;
1936         }
1937       }
1938 
1939       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1940       size_t free = capacity() - used();
1941 
1942       // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1943       // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1944       // my internally tracked values of used() and free().
1945       assert(free == total_free, "Free memory should match");
1946       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1947                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
1948                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1949                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1950       );
1951 
1952       ls.print("Frag: ");
1953       size_t frag_ext;
1954       if (total_free_ext > 0) {
1955         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1956       } else {
1957         frag_ext = 0;
1958       }
1959       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1960 
1961       size_t frag_int;
1962       if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1963         frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1964                     / ShenandoahHeapRegion::region_size_bytes());
1965       } else {
1966         frag_int = 0;
1967       }
1968       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
1969       ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
1970                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used),
1971                _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
1972     }
1973 
1974     {
1975       size_t max = 0;
1976       size_t total_free = 0;
1977       size_t total_used = 0;
1978 
1979       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1980            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1981         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
1982           ShenandoahHeapRegion *r = _heap->get_region(idx);
1983           size_t free = alloc_capacity(r);
1984           max = MAX2(max, free);
1985           total_free += free;
1986           total_used += r->used();
1987         }
1988       }
1989       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1990                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1991                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1992                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1993     }
1994 
1995     if (_heap->mode()->is_generational()) {
1996       size_t max = 0;
1997       size_t total_free = 0;
1998       size_t total_used = 0;
1999 
2000       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
2001            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); idx++) {
2002         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, idx)) {
2003           ShenandoahHeapRegion *r = _heap->get_region(idx);
2004           size_t free = alloc_capacity(r);
2005           max = MAX2(max, free);
2006           total_free += free;
2007           total_used += r->used();
2008         }
2009       }
2010       ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
2011                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
2012                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
2013                   byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
2014     }
2015   }
2016 }
2017 
2018 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
2019   shenandoah_assert_heaplocked();
2020   if (ShenandoahHeapRegion::requires_humongous(req.size())) {


2021     switch (req.type()) {
2022       case ShenandoahAllocRequest::_alloc_shared:
2023       case ShenandoahAllocRequest::_alloc_shared_gc:
2024         in_new_region = true;
2025         return allocate_contiguous(req);
2026       case ShenandoahAllocRequest::_alloc_plab:
2027       case ShenandoahAllocRequest::_alloc_gclab:
2028       case ShenandoahAllocRequest::_alloc_tlab:
2029         in_new_region = false;
2030         assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size());

2031         return nullptr;
2032       default:
2033         ShouldNotReachHere();
2034         return nullptr;
2035     }
2036   } else {
2037     return allocate_single(req, in_new_region);
2038   }
2039 }
2040 
















2041 void ShenandoahFreeSet::print_on(outputStream* out) const {
2042   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
2043   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
2044   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
2045     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
2046            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2047     _heap->get_region(index)->print_on(out);
2048     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
2049   }
2050   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
2051   rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
2052   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
2053     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
2054            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2055     _heap->get_region(index)->print_on(out);
2056     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);
2057   }
2058   if (_heap->mode()->is_generational()) {
2059     out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector));
2060     for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
2061          index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) {
2062       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
2063         _heap->get_region(index)->print_on(out);
2064       }
2065     }
2066   }
2067 }
2068 





















2069 double ShenandoahFreeSet::internal_fragmentation() {
2070   double squared = 0;
2071   double linear = 0;

2072 
2073   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
2074   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
2075     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
2076            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2077     ShenandoahHeapRegion* r = _heap->get_region(index);
2078     size_t used = r->used();
2079     squared += used * used;
2080     linear += used;
2081     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
2082   }
2083 
2084   if (linear > 0) {
2085     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
2086     return 1 - s;
2087   } else {
2088     return 0;
2089   }
2090 }
2091 













2092 double ShenandoahFreeSet::external_fragmentation() {
2093   idx_t last_idx = 0;
2094   size_t max_contig = 0;
2095   size_t empty_contig = 0;
2096 
2097   size_t free = 0;
2098 
2099   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
2100   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
2101     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
2102            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2103     ShenandoahHeapRegion* r = _heap->get_region(index);
2104     if (r->is_empty()) {
2105       free += ShenandoahHeapRegion::region_size_bytes();
2106       if (last_idx + 1 == index) {
2107         empty_contig++;

2108       } else {
2109         empty_contig = 1;
2110       }
2111     } else {
2112       empty_contig = 0;

2113     }
2114     max_contig = MAX2(max_contig, empty_contig);
2115     last_idx = index;
2116     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
2117   }
2118 
2119   if (free > 0) {
2120     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
2121   } else {
2122     return 0;
2123   }
2124 }
2125 



























< prev index next >