< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page

   1 /*
   2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/tlab_globals.hpp"


  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"



  31 #include "logging/logStream.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "runtime/orderAccess.hpp"
  34 



















































































































































































































































































































































































  35 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
  36   _heap(heap),
  37   _mutator_free_bitmap(max_regions, mtGC),
  38   _collector_free_bitmap(max_regions, mtGC),
  39   _max(max_regions)
  40 {
  41   clear_internal();
  42 }
  43 
  44 void ShenandoahFreeSet::increase_used(size_t num_bytes) {




  45   shenandoah_assert_heaplocked();
  46   _used += num_bytes;
  47 
  48   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
  49          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);


































  50 }
  51 
  52 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  53   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  54           idx, _max, _mutator_leftmost, _mutator_rightmost);
  55   return _mutator_free_bitmap.at(idx);





  56 }
  57 
  58 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  59   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  60           idx, _max, _collector_leftmost, _collector_rightmost);
  61   return _collector_free_bitmap.at(idx);



















  62 }
  63 
  64 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {


  65   // Scan the bitmap looking for a first fit.
  66   //
  67   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  68   // we would find the region to allocate at right away.
  69   //
  70   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  71   // go to the end. This makes application allocation faster, because we would clear lots
  72   // of regions from the beginning most of the time.
  73   //
  74   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  75   // unless we special cases for stealing and mixed allocations.
  76 



























  77   switch (req.type()) {
  78     case ShenandoahAllocRequest::_alloc_tlab:
  79     case ShenandoahAllocRequest::_alloc_shared: {
  80 
  81       // Try to allocate in the mutator view
  82       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  83         if (is_mutator_free(idx)) {
  84           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  85           if (result != nullptr) {
  86             return result;








  87           }
  88         }
  89       }
  90 
  91       // There is no recovery. Mutator does not touch collector view at all.
  92       break;
  93     }
  94     case ShenandoahAllocRequest::_alloc_gclab:
  95     case ShenandoahAllocRequest::_alloc_shared_gc: {
  96       // size_t is unsigned, need to dodge underflow when _leftmost = 0



  97 
  98       // Fast-path: try to allocate in the collector view first
  99       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
 100         size_t idx = c - 1;
 101         if (is_collector_free(idx)) {
 102           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);


























 103           if (result != nullptr) {
 104             return result;
 105           }
 106         }
 107       }
 108 
 109       // No dice. Can we borrow space from mutator view?
 110       if (!ShenandoahEvacReserveOverflow) {
 111         return nullptr;
 112       }
 113 
 114       // Try to steal the empty region from the mutator view
 115       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 116         size_t idx = c - 1;
 117         if (is_mutator_free(idx)) {
 118           ShenandoahHeapRegion* r = _heap->get_region(idx);
 119           if (can_allocate_from(r)) {
 120             flip_to_gc(r);
 121             HeapWord *result = try_allocate_in(r, req, in_new_region);
 122             if (result != nullptr) {
 123               return result;

























 124             }
 125           }
 126         }
 127       }
 128 
 129       // No dice. Do not try to mix mutator and GC allocations, because
 130       // URWM moves due to GC allocations would expose unparsable mutator
 131       // allocations.
 132 
 133       break;
 134     }
 135     default:
 136       ShouldNotReachHere();
 137   }
 138 
 139   return nullptr;
 140 }
 141 
 142 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 143   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());





























 144 
 145   if (_heap->is_concurrent_weak_root_in_progress() &&
 146       r->is_trash()) {





















 147     return nullptr;
 148   }
 149 
 150   try_recycle_trashed(r);
















 151 
 152   in_new_region = r->is_empty();






 153 

 154   HeapWord* result = nullptr;
 155   size_t size = req.size();
 156 






 157   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 158     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 159     if (size > free) {
 160       size = free;






























 161     }
 162     if (size >= req.min_size()) {
 163       result = r->allocate(size, req.type());
 164       assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);




 165     }
 166   } else {
 167     result = r->allocate(size, req.type());





 168   }
 169 

 170   if (result != nullptr) {
 171     // Allocation successful, bump stats:
 172     if (req.is_mutator_alloc()) {
 173       increase_used(size * HeapWordSize);
 174     }
 175 
 176     // Record actual allocation size
 177     req.set_actual_size(size);
 178 
 179     if (req.is_gc_alloc()) {





 180       r->set_update_watermark(r->top());




 181     }
 182   }
 183 
 184   if (result == nullptr || has_no_alloc_capacity(r)) {
 185     // Region cannot afford this or future allocations. Retire it.
 186     //
 187     // While this seems a bit harsh, especially in the case when this large allocation does not
 188     // fit, but the next small one would, we are risking to inflate scan times when lots of
 189     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 190     // TODO: Record first fully-empty region, and use that for large allocations
 191 
 192     // Record the remainder as allocation waste

 193     if (req.is_mutator_alloc()) {
 194       size_t waste = r->free();
 195       if (waste > 0) {
 196         increase_used(waste);
 197         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);

 198       }




 199     }
 200 
 201     size_t num = r->index();
 202     _collector_free_bitmap.clear_bit(num);
 203     _mutator_free_bitmap.clear_bit(num);
 204     // Touched the bounds? Need to update:
 205     if (touches_bounds(num)) {
 206       adjust_bounds();
 207     }
 208     assert_bounds();
 209   }
 210   return result;
 211 }
 212 
 213 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 214   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 215 }
 216 
 217 void ShenandoahFreeSet::recompute_bounds() {
 218   // Reset to the most pessimistic case:
 219   _mutator_rightmost = _max - 1;
 220   _mutator_leftmost = 0;
 221   _collector_rightmost = _max - 1;
 222   _collector_leftmost = 0;
 223 
 224   // ...and adjust from there
 225   adjust_bounds();
 226 }
 227 
 228 void ShenandoahFreeSet::adjust_bounds() {
 229   // Rewind both mutator bounds until the next bit.
 230   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 231     _mutator_leftmost++;
 232   }
 233   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 234     _mutator_rightmost--;
 235   }
 236   // Rewind both collector bounds until the next bit.
 237   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 238     _collector_leftmost++;
 239   }
 240   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 241     _collector_rightmost--;
 242   }
 243 }
 244 
 245 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 246   shenandoah_assert_heaplocked();
 247 
 248   size_t words_size = req.size();
 249   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 250 
 251   // No regions left to satisfy allocation, bye.
 252   if (num > mutator_count()) {
 253     return nullptr;










 254   }
 255 
 256   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 257   // inclusive. Contiguous allocations are biased to the beginning.
 258 
 259   size_t beg = _mutator_leftmost;
 260   size_t end = beg;
 261 
 262   while (true) {
 263     if (end >= _max) {
 264       // Hit the end, goodbye
 265       return nullptr;
 266     }
 267 
 268     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 269     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 270     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
 271       end++;
 272       beg = end;
 273       continue;
 274     }
 275 
 276     if ((end - beg + 1) == num) {
 277       // found the match
 278       break;
 279     }
 280 
 281     end++;
 282   }
 283 
 284   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();

 285 
 286   // Initialize regions:
 287   for (size_t i = beg; i <= end; i++) {
 288     ShenandoahHeapRegion* r = _heap->get_region(i);
 289     try_recycle_trashed(r);
 290 
 291     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 292     assert(r->is_empty(), "Should be empty");
 293 
 294     if (i == beg) {
 295       r->make_humongous_start();
 296     } else {
 297       r->make_humongous_cont();
 298     }
 299 
 300     // Trailing region may be non-full, record the remainder there
 301     size_t used_words;
 302     if ((i == end) && (remainder != 0)) {
 303       used_words = remainder;
 304     } else {
 305       used_words = ShenandoahHeapRegion::region_size_words();
 306     }
 307 


 308     r->set_top(r->bottom() + used_words);
 309 
 310     _mutator_free_bitmap.clear_bit(r->index());

 311   }

 312 
 313   // While individual regions report their true use, all humongous regions are
 314   // marked used in the free set.
 315   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 316 
 317   if (remainder != 0) {
 318     // Record this remainder as allocation waste
 319     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 320   }
 321 
 322   // Allocated at left/rightmost? Move the bounds appropriately.
 323   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 324     adjust_bounds();
 325   }
 326   assert_bounds();
 327 
 328   req.set_actual_size(words_size);
 329   return _heap->get_region(beg)->bottom();
 330 }
 331 
 332 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {



 333   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 334 }
 335 
 336 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {










 337   if (r->is_trash()) {
 338     // This would be recycled on allocation path
 339     return ShenandoahHeapRegion::region_size_bytes();
 340   } else {
 341     return r->free();
 342   }
 343 }
 344 
 345 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 346   return alloc_capacity(r) == 0;
 347 }
 348 
 349 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 350   if (r->is_trash()) {
 351     _heap->decrease_used(r->used());
 352     r->recycle();
 353   }
 354 }
 355 
 356 void ShenandoahFreeSet::recycle_trash() {
 357   // lock is not reentrable, check we don't have it
 358   shenandoah_assert_not_heaplocked();
 359 
 360   for (size_t i = 0; i < _heap->num_regions(); i++) {
 361     ShenandoahHeapRegion* r = _heap->get_region(i);
 362     if (r->is_trash()) {
 363       ShenandoahHeapLocker locker(_heap->lock());
 364       try_recycle_trashed(r);
 365     }
 366     SpinPause(); // allow allocators to take the lock
 367   }
 368 }
 369 
 370 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 371   size_t idx = r->index();
 372 
 373   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");

 374   assert(can_allocate_from(r), "Should not be allocated");
 375 
 376   _mutator_free_bitmap.clear_bit(idx);
 377   _collector_free_bitmap.set_bit(idx);
 378   _collector_leftmost = MIN2(idx, _collector_leftmost);
 379   _collector_rightmost = MAX2(idx, _collector_rightmost);














 380 
 381   _capacity -= alloc_capacity(r);


 382 
 383   if (touches_bounds(idx)) {
 384     adjust_bounds();
 385   }
 386   assert_bounds();
 387 }
 388 
 389 void ShenandoahFreeSet::clear() {
 390   shenandoah_assert_heaplocked();
 391   clear_internal();
 392 }
 393 
 394 void ShenandoahFreeSet::clear_internal() {
 395   _mutator_free_bitmap.clear();
 396   _collector_free_bitmap.clear();
 397   _mutator_leftmost = _max;
 398   _mutator_rightmost = 0;
 399   _collector_leftmost = _max;
 400   _collector_rightmost = 0;
 401   _capacity = 0;
 402   _used = 0;
 403 }
 404 
 405 void ShenandoahFreeSet::rebuild() {
 406   shenandoah_assert_heaplocked();
 407   clear();
 408 









 409   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 410     ShenandoahHeapRegion* region = _heap->get_region(idx);















 411     if (region->is_alloc_allowed() || region->is_trash()) {
 412       assert(!region->is_cset(), "Shouldn't be adding those to the free set");





















 413 
 414       // Do not add regions that would surely fail allocation
 415       if (has_no_alloc_capacity(region)) continue;


















 416 
 417       _capacity += alloc_capacity(region);
 418       assert(_used <= _capacity, "must not use more than we have");















 419 
 420       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 421       _mutator_free_bitmap.set_bit(idx);








 422     }
 423   }
 424 
 425   // Evac reserve: reserve trailing space for evacuations
 426   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
 427   size_t reserved = 0;





 428 
 429   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 430     if (reserved >= to_reserve) break;
 431 
 432     ShenandoahHeapRegion* region = _heap->get_region(idx);
 433     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
 434       _mutator_free_bitmap.clear_bit(idx);
 435       _collector_free_bitmap.set_bit(idx);
 436       size_t ac = alloc_capacity(region);
 437       _capacity -= ac;
 438       reserved += ac;






































































































































 439     }
 440   }
 441 
 442   recompute_bounds();
 443   assert_bounds();










 444 }
 445 
 446 void ShenandoahFreeSet::log_status() {
 447   shenandoah_assert_heaplocked();
 448 
 449   LogTarget(Info, gc, ergo) lt;


























































































 450   if (lt.is_enabled()) {
 451     ResourceMark rm;
 452     LogStream ls(lt);
 453 
 454     {
 455       size_t last_idx = 0;
 456       size_t max = 0;
 457       size_t max_contig = 0;
 458       size_t empty_contig = 0;
 459 
 460       size_t total_used = 0;
 461       size_t total_free = 0;
 462       size_t total_free_ext = 0;
 463 
 464       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 465         if (is_mutator_free(idx)) {
 466           ShenandoahHeapRegion *r = _heap->get_region(idx);
 467           size_t free = alloc_capacity(r);
 468 
 469           max = MAX2(max, free);
 470 
 471           if (r->is_empty()) {
 472             total_free_ext += free;
 473             if (last_idx + 1 == idx) {
 474               empty_contig++;
 475             } else {
 476               empty_contig = 1;
 477             }
 478           } else {
 479             empty_contig = 0;
 480           }
 481 
 482           total_used += r->used();
 483           total_free += free;
 484 
 485           max_contig = MAX2(max_contig, empty_contig);
 486           last_idx = idx;
 487         }
 488       }
 489 
 490       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 491       size_t free = capacity() - used();
 492 




 493       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
 494                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
 495                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
 496                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
 497       );
 498 
 499       ls.print("Frag: ");
 500       size_t frag_ext;
 501       if (total_free_ext > 0) {
 502         frag_ext = 100 - (100 * max_humongous / total_free_ext);
 503       } else {
 504         frag_ext = 0;
 505       }
 506       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
 507 
 508       size_t frag_int;
 509       if (mutator_count() > 0) {
 510         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 511       } else {
 512         frag_int = 0;
 513       }
 514       ls.print(SIZE_FORMAT "%% internal; ", frag_int);


 515     }
 516 
 517     {
 518       size_t max = 0;
 519       size_t total_free = 0;

 520 
 521       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 522         if (is_collector_free(idx)) {
 523           ShenandoahHeapRegion *r = _heap->get_region(idx);
 524           size_t free = alloc_capacity(r);
 525           max = MAX2(max, free);
 526           total_free += free;

 527         }
 528       }





 529 
 530       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",














 531                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
 532                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));

 533     }
 534   }
 535 }
 536 
 537 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
 538   shenandoah_assert_heaplocked();
 539   assert_bounds();
 540 

 541   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 542     switch (req.type()) {
 543       case ShenandoahAllocRequest::_alloc_shared:
 544       case ShenandoahAllocRequest::_alloc_shared_gc:
 545         in_new_region = true;
 546         return allocate_contiguous(req);

 547       case ShenandoahAllocRequest::_alloc_gclab:
 548       case ShenandoahAllocRequest::_alloc_tlab:
 549         in_new_region = false;
 550         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 551                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
 552         return nullptr;
 553       default:
 554         ShouldNotReachHere();
 555         return nullptr;
 556     }
 557   } else {
 558     return allocate_single(req, in_new_region);
 559   }
 560 }
 561 
 562 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 563   // Deliberately not locked, this method is unsafe when free set is modified.
 564 
 565   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 566     if (index < _max && is_mutator_free(index)) {
 567       ShenandoahHeapRegion* r = _heap->get_region(index);
 568       if (r->free() >= MinTLABSize) {
 569         return r->free();
 570       }
 571     }
 572   }
 573 
 574   // It appears that no regions left
 575   return 0;
 576 }
 577 
 578 void ShenandoahFreeSet::print_on(outputStream* out) const {
 579   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 580   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 581     if (is_mutator_free(index)) {
 582       _heap->get_region(index)->print_on(out);
 583     }
 584   }
 585   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 586   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 587     if (is_collector_free(index)) {
 588       _heap->get_region(index)->print_on(out);
 589     }
 590   }








 591 }
 592 
 593 /*
 594  * Internal fragmentation metric: describes how fragmented the heap regions are.
 595  *
 596  * It is derived as:
 597  *
 598  *               sum(used[i]^2, i=0..k)
 599  *   IF = 1 - ------------------------------
 600  *              C * sum(used[i], i=0..k)
 601  *
 602  * ...where k is the number of regions in computation, C is the region capacity, and
 603  * used[i] is the used space in the region.
 604  *
 605  * The non-linearity causes IF to be lower for the cases where the same total heap
 606  * used is densely packed. For example:
 607  *   a) Heap is completely full  => IF = 0
 608  *   b) Heap is half full, first 50% regions are completely full => IF = 0
 609  *   c) Heap is half full, each region is 50% full => IF = 1/2
 610  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
 611  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
 612  *   f) Heap has one small object per each region => IF =~ 1
 613  */
 614 double ShenandoahFreeSet::internal_fragmentation() {
 615   double squared = 0;
 616   double linear = 0;
 617   int count = 0;
 618 
 619   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 620     if (is_mutator_free(index)) {
 621       ShenandoahHeapRegion* r = _heap->get_region(index);
 622       size_t used = r->used();
 623       squared += used * used;
 624       linear += used;
 625       count++;
 626     }
 627   }
 628 
 629   if (count > 0) {
 630     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
 631     return 1 - s;
 632   } else {
 633     return 0;
 634   }
 635 }
 636 
 637 /*
 638  * External fragmentation metric: describes how fragmented the heap is.
 639  *
 640  * It is derived as:
 641  *
 642  *   EF = 1 - largest_contiguous_free / total_free
 643  *
 644  * For example:
 645  *   a) Heap is completely empty => EF = 0
 646  *   b) Heap is completely full => EF = 0
 647  *   c) Heap is first-half full => EF = 1/2
 648  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
 649  */
 650 double ShenandoahFreeSet::external_fragmentation() {
 651   size_t last_idx = 0;
 652   size_t max_contig = 0;
 653   size_t empty_contig = 0;
 654 
 655   size_t free = 0;
 656 
 657   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 658     if (is_mutator_free(index)) {
 659       ShenandoahHeapRegion* r = _heap->get_region(index);
 660       if (r->is_empty()) {
 661         free += ShenandoahHeapRegion::region_size_bytes();
 662         if (last_idx + 1 == index) {
 663           empty_contig++;
 664         } else {
 665           empty_contig = 1;
 666         }
 667       } else {
 668         empty_contig = 0;
 669       }
 670 
 671       max_contig = MAX2(max_contig, empty_contig);
 672       last_idx = index;
 673     }
 674   }
 675 
 676   if (free > 0) {
 677     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
 678   } else {
 679     return 0;
 680   }
 681 }
 682 
 683 #ifdef ASSERT
 684 void ShenandoahFreeSet::assert_bounds() const {
 685   // Performance invariants. Failing these would not break the free set, but performance
 686   // would suffer.
 687   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
 688   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
 689 
 690   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
 691   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
 692 
 693   size_t beg_off = _mutator_free_bitmap.find_first_set_bit(0);
 694   size_t end_off = _mutator_free_bitmap.find_first_set_bit(_mutator_rightmost + 1);
 695   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
 696   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
 697 
 698   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
 699   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
 700 
 701   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
 702   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
 703 
 704   beg_off = _collector_free_bitmap.find_first_set_bit(0);
 705   end_off = _collector_free_bitmap.find_first_set_bit(_collector_rightmost + 1);
 706   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
 707   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
 708 }
 709 #endif

   1 /*
   2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "runtime/orderAccess.hpp"
  40 
  41 ShenandoahSetsOfFree::ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set) :
  42     _max(max_regions),
  43     _free_set(free_set),
  44     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes())
  45 {
  46   _membership = NEW_C_HEAP_ARRAY(ShenandoahFreeMemoryType, max_regions, mtGC);
  47   clear_internal();
  48 }
  49 
  50 ShenandoahSetsOfFree::~ShenandoahSetsOfFree() {
  51   FREE_C_HEAP_ARRAY(ShenandoahFreeMemoryType, _membership);
  52 }
  53 
  54 
  55 void ShenandoahSetsOfFree::clear_internal() {
  56   for (size_t idx = 0; idx < _max; idx++) {
  57     _membership[idx] = NotFree;
  58   }
  59 
  60   for (size_t idx = 0; idx < NumFreeSets; idx++) {
  61     _leftmosts[idx] = _max;
  62     _rightmosts[idx] = 0;
  63     _leftmosts_empty[idx] = _max;
  64     _rightmosts_empty[idx] = 0;
  65     _capacity_of[idx] = 0;
  66     _used_by[idx] = 0;
  67   }
  68 
  69   _left_to_right_bias[Mutator] = true;
  70   _left_to_right_bias[Collector] = false;
  71   _left_to_right_bias[OldCollector] = false;
  72 
  73   _region_counts[Mutator] = 0;
  74   _region_counts[Collector] = 0;
  75   _region_counts[OldCollector] = 0;
  76   _region_counts[NotFree] = _max;
  77 }
  78 
  79 void ShenandoahSetsOfFree::clear_all() {
  80   clear_internal();
  81 }
  82 
  83 void ShenandoahSetsOfFree::increase_used(ShenandoahFreeMemoryType which_set, size_t bytes) {
  84   assert (which_set > NotFree && which_set < NumFreeSets, "Set must correspond to a valid freeset");
  85   _used_by[which_set] += bytes;
  86   assert (_used_by[which_set] <= _capacity_of[which_set],
  87           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
  88           _used_by[which_set], _capacity_of[which_set], bytes);
  89 }
  90 
  91 inline void ShenandoahSetsOfFree::shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx) {
  92   if (idx == _leftmosts[set]) {
  93     while ((_leftmosts[set] < _max) && !in_free_set(_leftmosts[set], set)) {
  94       _leftmosts[set]++;
  95     }
  96     if (_leftmosts_empty[set] < _leftmosts[set]) {
  97       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
  98       _leftmosts_empty[set] = _leftmosts[set];
  99     }
 100   }
 101   if (idx == _rightmosts[set]) {
 102     while (_rightmosts[set] > 0 && !in_free_set(_rightmosts[set], set)) {
 103       _rightmosts[set]--;
 104     }
 105     if (_rightmosts_empty[set] > _rightmosts[set]) {
 106       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 107       _rightmosts_empty[set] = _rightmosts[set];
 108     }
 109   }
 110 }
 111 
 112 inline void ShenandoahSetsOfFree::expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t region_capacity) {
 113   if (region_capacity == _region_size_bytes) {
 114     if (_leftmosts_empty[set] > idx) {
 115       _leftmosts_empty[set] = idx;
 116     }
 117     if (_rightmosts_empty[set] < idx) {
 118       _rightmosts_empty[set] = idx;
 119     }
 120   }
 121   if (_leftmosts[set] > idx) {
 122     _leftmosts[set] = idx;
 123   }
 124   if (_rightmosts[set] < idx) {
 125     _rightmosts[set] = idx;
 126   }
 127 }
 128 
 129 void ShenandoahSetsOfFree::remove_from_free_sets(size_t idx) {
 130   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 131   ShenandoahFreeMemoryType orig_set = membership(idx);
 132   assert (orig_set > NotFree && orig_set < NumFreeSets, "Cannot remove from free sets if not already free");
 133   _membership[idx] = NotFree;
 134   shrink_bounds_if_touched(orig_set, idx);
 135 
 136   _region_counts[orig_set]--;
 137   _region_counts[NotFree]++;
 138 }
 139 
 140 
 141 void ShenandoahSetsOfFree::make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity) {
 142   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 143   assert (_membership[idx] == NotFree, "Cannot make free if already free");
 144   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 145   _membership[idx] = which_set;
 146   _capacity_of[which_set] += region_capacity;
 147   expand_bounds_maybe(which_set, idx, region_capacity);
 148 
 149   _region_counts[NotFree]--;
 150   _region_counts[which_set]++;
 151 }
 152 
 153 void ShenandoahSetsOfFree::move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity) {
 154   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 155   assert ((new_set > NotFree) && (new_set < NumFreeSets), "New set must be valid");
 156   ShenandoahFreeMemoryType orig_set = _membership[idx];
 157   assert ((orig_set > NotFree) && (orig_set < NumFreeSets), "Cannot move free unless already free");
 158   // Expected transitions:
 159   //  During rebuild: Mutator => Collector
 160   //                  Mutator empty => Collector
 161   //  During flip_to_gc:
 162   //                  Mutator empty => Collector
 163   //                  Mutator empty => Old Collector
 164   // At start of update refs:
 165   //                  Collector => Mutator
 166   //                  OldCollector Empty => Mutator
 167   assert (((region_capacity <= _region_size_bytes) &&
 168            ((orig_set == Mutator) && (new_set == Collector)) ||
 169            ((orig_set == Collector) && (new_set == Mutator))) ||
 170           ((region_capacity == _region_size_bytes) &&
 171            ((orig_set == Mutator) && (new_set == Collector)) ||
 172            ((orig_set == OldCollector) && (new_set == Mutator)) ||
 173            (new_set == OldCollector)), "Unexpected movement between sets");
 174 
 175   _membership[idx] = new_set;
 176   _capacity_of[orig_set] -= region_capacity;
 177   shrink_bounds_if_touched(orig_set, idx);
 178 
 179   _capacity_of[new_set] += region_capacity;
 180   expand_bounds_maybe(new_set, idx, region_capacity);
 181 
 182   _region_counts[orig_set]--;
 183   _region_counts[new_set]++;
 184 }
 185 
 186 inline ShenandoahFreeMemoryType ShenandoahSetsOfFree::membership(size_t idx) const {
 187   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 188   return _membership[idx];
 189 }
 190 
 191   // Returns true iff region idx is in the test_set free_set.  Before returning true, asserts that the free
 192   // set is not empty.  Requires that test_set != NotFree or NumFreeSets.
 193 inline bool ShenandoahSetsOfFree::in_free_set(size_t idx, ShenandoahFreeMemoryType test_set) const {
 194   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 195   if (_membership[idx] == test_set) {
 196     assert (test_set == NotFree || _free_set->alloc_capacity(idx) > 0, "Free regions must have alloc capacity");
 197     return true;
 198   } else {
 199     return false;
 200   }
 201 }
 202 
 203 inline size_t ShenandoahSetsOfFree::leftmost(ShenandoahFreeMemoryType which_set) const {
 204   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 205   size_t idx = _leftmosts[which_set];
 206   if (idx >= _max) {
 207     return _max;
 208   } else {
 209     assert (in_free_set(idx, which_set), "left-most region must be free");
 210     return idx;
 211   }
 212 }
 213 
 214 inline size_t ShenandoahSetsOfFree::rightmost(ShenandoahFreeMemoryType which_set) const {
 215   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 216   size_t idx = _rightmosts[which_set];
 217   assert ((_leftmosts[which_set] == _max) || in_free_set(idx, which_set), "right-most region must be free");
 218   return idx;
 219 }
 220 
 221 inline bool ShenandoahSetsOfFree::is_empty(ShenandoahFreeMemoryType which_set) const {
 222   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 223   return (leftmost(which_set) > rightmost(which_set));
 224 }
 225 
 226 size_t ShenandoahSetsOfFree::leftmost_empty(ShenandoahFreeMemoryType which_set) {
 227   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 228   for (size_t idx = _leftmosts_empty[which_set]; idx < _max; idx++) {
 229     if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
 230       _leftmosts_empty[which_set] = idx;
 231       return idx;
 232     }
 233   }
 234   _leftmosts_empty[which_set] = _max;
 235   _rightmosts_empty[which_set] = 0;
 236   return _max;
 237 }
 238 
 239 inline size_t ShenandoahSetsOfFree::rightmost_empty(ShenandoahFreeMemoryType which_set) {
 240   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 241   for (intptr_t idx = _rightmosts_empty[which_set]; idx >= 0; idx--) {
 242     if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
 243       _rightmosts_empty[which_set] = idx;
 244       return idx;
 245     }
 246   }
 247   _leftmosts_empty[which_set] = _max;
 248   _rightmosts_empty[which_set] = 0;
 249   return 0;
 250 }
 251 
 252 inline bool ShenandoahSetsOfFree::alloc_from_left_bias(ShenandoahFreeMemoryType which_set) {
 253   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 254   return _left_to_right_bias[which_set];
 255 }
 256 
 257 void ShenandoahSetsOfFree::establish_alloc_bias(ShenandoahFreeMemoryType which_set) {
 258   ShenandoahHeap* heap = ShenandoahHeap::heap();
 259   shenandoah_assert_heaplocked();
 260   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 261 
 262   size_t middle = (_leftmosts[which_set] + _rightmosts[which_set]) / 2;
 263   size_t available_in_first_half = 0;
 264   size_t available_in_second_half = 0;
 265 
 266   for (size_t index = _leftmosts[which_set]; index < middle; index++) {
 267     if (in_free_set(index, which_set)) {
 268       ShenandoahHeapRegion* r = heap->get_region(index);
 269       available_in_first_half += r->free();
 270     }
 271   }
 272   for (size_t index = middle; index <= _rightmosts[which_set]; index++) {
 273     if (in_free_set(index, which_set)) {
 274       ShenandoahHeapRegion* r = heap->get_region(index);
 275       available_in_second_half += r->free();
 276     }
 277   }
 278 
 279   // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
 280   // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
 281   // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
 282   // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
 283 
 284   // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the
 285   // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left.
 286   _left_to_right_bias[which_set] = (available_in_second_half > available_in_first_half);
 287 }
 288 
 289 #ifdef ASSERT
 290 void ShenandoahSetsOfFree::assert_bounds() {
 291 
 292   size_t leftmosts[NumFreeSets];
 293   size_t rightmosts[NumFreeSets];
 294   size_t empty_leftmosts[NumFreeSets];
 295   size_t empty_rightmosts[NumFreeSets];
 296 
 297   for (int i = 0; i < NumFreeSets; i++) {
 298     leftmosts[i] = _max;
 299     empty_leftmosts[i] = _max;
 300     rightmosts[i] = 0;
 301     empty_rightmosts[i] = 0;
 302   }
 303 
 304   for (size_t i = 0; i < _max; i++) {
 305     ShenandoahFreeMemoryType set = membership(i);
 306     switch (set) {
 307       case NotFree:
 308         break;
 309 
 310       case Mutator:
 311       case Collector:
 312       case OldCollector:
 313       {
 314         size_t capacity = _free_set->alloc_capacity(i);
 315         bool is_empty = (capacity == _region_size_bytes);
 316         assert(capacity > 0, "free regions must have allocation capacity");
 317         if (i < leftmosts[set]) {
 318           leftmosts[set] = i;
 319         }
 320         if (is_empty && (i < empty_leftmosts[set])) {
 321           empty_leftmosts[set] = i;
 322         }
 323         if (i > rightmosts[set]) {
 324           rightmosts[set] = i;
 325         }
 326         if (is_empty && (i > empty_rightmosts[set])) {
 327           empty_rightmosts[set] = i;
 328         }
 329         break;
 330       }
 331 
 332       case NumFreeSets:
 333       default:
 334         ShouldNotReachHere();
 335     }
 336   }
 337 
 338   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 339   assert (leftmost(Mutator) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Mutator),  _max);
 340   assert (rightmost(Mutator) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Mutator),  _max);
 341 
 342   assert (leftmost(Mutator) == _max || in_free_set(leftmost(Mutator), Mutator),
 343           "leftmost region should be free: " SIZE_FORMAT,  leftmost(Mutator));
 344   assert (leftmost(Mutator) == _max || in_free_set(rightmost(Mutator), Mutator),
 345           "rightmost region should be free: " SIZE_FORMAT, rightmost(Mutator));
 346 
 347   // If Mutator set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 348   size_t beg_off = leftmosts[Mutator];
 349   size_t end_off = rightmosts[Mutator];
 350   assert (beg_off >= leftmost(Mutator),
 351           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Mutator));
 352   assert (end_off <= rightmost(Mutator),
 353           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(Mutator));
 354 
 355   beg_off = empty_leftmosts[Mutator];
 356   end_off = empty_rightmosts[Mutator];
 357   assert (beg_off >= leftmost_empty(Mutator),
 358           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Mutator));
 359   assert (end_off <= rightmost_empty(Mutator),
 360           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(Mutator));
 361 
 362   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 363   assert (leftmost(Collector) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Collector),  _max);
 364   assert (rightmost(Collector) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Collector),  _max);
 365 
 366   assert (leftmost(Collector) == _max || in_free_set(leftmost(Collector), Collector),
 367           "leftmost region should be free: " SIZE_FORMAT,  leftmost(Collector));
 368   assert (leftmost(Collector) == _max || in_free_set(rightmost(Collector), Collector),
 369           "rightmost region should be free: " SIZE_FORMAT, rightmost(Collector));
 370 
 371   // If Collector set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 372   beg_off = leftmosts[Collector];
 373   end_off = rightmosts[Collector];
 374   assert (beg_off >= leftmost(Collector),
 375           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Collector));
 376   assert (end_off <= rightmost(Collector),
 377           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(Collector));
 378 
 379   beg_off = empty_leftmosts[Collector];
 380   end_off = empty_rightmosts[Collector];
 381   assert (beg_off >= leftmost_empty(Collector),
 382           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Collector));
 383   assert (end_off <= rightmost_empty(Collector),
 384           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(Collector));
 385 
 386   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 387   assert (leftmost(OldCollector) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(OldCollector),  _max);
 388   assert (rightmost(OldCollector) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(OldCollector),  _max);
 389 
 390   assert (leftmost(OldCollector) == _max || in_free_set(leftmost(OldCollector), OldCollector),
 391           "leftmost region should be free: " SIZE_FORMAT,  leftmost(OldCollector));
 392   assert (leftmost(OldCollector) == _max || in_free_set(rightmost(OldCollector), OldCollector),
 393           "rightmost region should be free: " SIZE_FORMAT, rightmost(OldCollector));
 394 
 395   // If OldCollector set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 396   beg_off = leftmosts[OldCollector];
 397   end_off = rightmosts[OldCollector];
 398   assert (beg_off >= leftmost(OldCollector),
 399           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(OldCollector));
 400   assert (end_off <= rightmost(OldCollector),
 401           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(OldCollector));
 402 
 403   beg_off = empty_leftmosts[OldCollector];
 404   end_off = empty_rightmosts[OldCollector];
 405   assert (beg_off >= leftmost_empty(OldCollector),
 406           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(OldCollector));
 407   assert (end_off <= rightmost_empty(OldCollector),
 408           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(OldCollector));
 409 }
 410 #endif
 411 
 412 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 413   _heap(heap),
 414   _free_sets(max_regions, this)


 415 {
 416   clear_internal();
 417 }
 418 
 419 // This allocates from a region within the old_collector_set.  If affiliation equals OLD, the allocation must be taken
 420 // from a region that is_old().  Otherwise, affiliation should be FREE, in which case this will put a previously unaffiliated
 421 // region into service.
 422 HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation affiliation,
 423                                                            ShenandoahAllocRequest& req, bool& in_new_region) {
 424   shenandoah_assert_heaplocked();

 425 
 426   size_t rightmost =
 427     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(OldCollector): _free_sets.rightmost(OldCollector);
 428   size_t leftmost =
 429     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(OldCollector): _free_sets.leftmost(OldCollector);
 430   if (_free_sets.alloc_from_left_bias(OldCollector)) {
 431     // This mode picks up stragglers left by a full GC
 432     for (size_t idx = leftmost; idx <= rightmost; idx++) {
 433       if (_free_sets.in_free_set(idx, OldCollector)) {
 434         ShenandoahHeapRegion* r = _heap->get_region(idx);
 435         assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
 436         if (r->affiliation() == affiliation) {
 437           HeapWord* result = try_allocate_in(r, req, in_new_region);
 438           if (result != nullptr) {
 439             return result;
 440           }
 441         }
 442       }
 443     }
 444   } else {
 445     // This mode picks up stragglers left by a previous concurrent GC
 446     for (size_t count = rightmost + 1; count > leftmost; count--) {
 447       // size_t is unsigned, need to dodge underflow when _leftmost = 0
 448       size_t idx = count - 1;
 449       if (_free_sets.in_free_set(idx, OldCollector)) {
 450         ShenandoahHeapRegion* r = _heap->get_region(idx);
 451         assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
 452         if (r->affiliation() == affiliation) {
 453           HeapWord* result = try_allocate_in(r, req, in_new_region);
 454           if (result != nullptr) {
 455             return result;
 456           }
 457         }
 458       }
 459     }
 460   }
 461   return nullptr;
 462 }
 463 
 464 void ShenandoahFreeSet::add_old_collector_free_region(ShenandoahHeapRegion* region) {
 465   shenandoah_assert_heaplocked();
 466   size_t idx = region->index();
 467   size_t capacity = alloc_capacity(region);
 468   assert(_free_sets.membership(idx) == NotFree, "Regions promoted in place should not be in any free set");
 469   if (capacity >= PLAB::min_size() * HeapWordSize) {
 470     _free_sets.make_free(idx, OldCollector, capacity);
 471     _heap->augment_promo_reserve(capacity);
 472   }
 473 }
 474 
 475 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation,
 476                                                        ShenandoahAllocRequest& req, bool& in_new_region) {
 477   shenandoah_assert_heaplocked();
 478   size_t rightmost =
 479     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(Collector): _free_sets.rightmost(Collector);
 480   size_t leftmost =
 481     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(Collector): _free_sets.leftmost(Collector);
 482   for (size_t c = rightmost + 1; c > leftmost; c--) {
 483     // size_t is unsigned, need to dodge underflow when _leftmost = 0
 484     size_t idx = c - 1;
 485     if (_free_sets.in_free_set(idx, Collector)) {
 486       ShenandoahHeapRegion* r = _heap->get_region(idx);
 487       if (r->affiliation() == affiliation) {
 488         HeapWord* result = try_allocate_in(r, req, in_new_region);
 489         if (result != nullptr) {
 490           return result;
 491         }
 492       }
 493     }
 494   }
 495   log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
 496                       shenandoah_affiliation_name(affiliation), p2i(&req));
 497   return nullptr;
 498 }
 499 
 500 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 501   shenandoah_assert_heaplocked();
 502 
 503   // Scan the bitmap looking for a first fit.
 504   //
 505   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 506   // we would find the region to allocate at right away.
 507   //
 508   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
 509   // go to the end. This makes application allocation faster, because we would clear lots
 510   // of regions from the beginning most of the time.
 511   //
 512   // Free set maintains mutator and collector views, and normally they allocate in their views only,
 513   // unless we special cases for stealing and mixed allocations.
 514 
 515   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 516 
 517   bool allow_new_region = true;
 518   if (_heap->mode()->is_generational()) {
 519     switch (req.affiliation()) {
 520       case ShenandoahAffiliation::OLD_GENERATION:
 521         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 522         if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
 523           allow_new_region = false;
 524         }
 525         break;
 526 
 527       case ShenandoahAffiliation::YOUNG_GENERATION:
 528         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 529         if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
 530           allow_new_region = false;
 531         }
 532         break;
 533 
 534       case ShenandoahAffiliation::FREE:
 535         fatal("Should request affiliation");
 536 
 537       default:
 538         ShouldNotReachHere();
 539         break;
 540     }
 541   }
 542   switch (req.type()) {
 543     case ShenandoahAllocRequest::_alloc_tlab:
 544     case ShenandoahAllocRequest::_alloc_shared: {

 545       // Try to allocate in the mutator view
 546       // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
 547       if (!_free_sets.is_empty(Mutator)) {
 548         // Use signed idx.  Otherwise, loop will never terminate.
 549         int leftmost = (int) _free_sets.leftmost(Mutator);
 550         for (int idx = (int) _free_sets.rightmost(Mutator); idx >= leftmost; idx--) {
 551           ShenandoahHeapRegion* r = _heap->get_region(idx);
 552           if (_free_sets.in_free_set(idx, Mutator) && (allow_new_region || r->is_affiliated())) {
 553             // try_allocate_in() increases used if the allocation is successful.
 554             HeapWord* result;
 555             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 556             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 557               return result;
 558             }
 559           }
 560         }
 561       }

 562       // There is no recovery. Mutator does not touch collector view at all.
 563       break;
 564     }
 565     case ShenandoahAllocRequest::_alloc_gclab:
 566       // GCLABs are for evacuation so we must be in evacuation phase.  If this allocation is successful, increment
 567       // the relevant evac_expended rather than used value.
 568 
 569     case ShenandoahAllocRequest::_alloc_plab:
 570       // PLABs always reside in old-gen and are only allocated during evacuation phase.
 571 
 572     case ShenandoahAllocRequest::_alloc_shared_gc: {
 573       if (!_heap->mode()->is_generational()) {
 574         // size_t is unsigned, need to dodge underflow when _leftmost = 0
 575         // Fast-path: try to allocate in the collector view first
 576         for (size_t c = _free_sets.rightmost(Collector) + 1; c > _free_sets.leftmost(Collector); c--) {
 577           size_t idx = c - 1;
 578           if (_free_sets.in_free_set(idx, Collector)) {
 579             HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 580             if (result != nullptr) {
 581               return result;
 582             }
 583           }
 584         }
 585       } else {
 586         // First try to fit into a region that is already in use in the same generation.
 587         HeapWord* result;
 588         if (req.is_old()) {
 589           result = allocate_old_with_affiliation(req.affiliation(), req, in_new_region);
 590         } else {
 591           result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
 592         }
 593         if (result != nullptr) {
 594           return result;
 595         }
 596         if (allow_new_region) {
 597           // Then try a free region that is dedicated to GC allocations.
 598           if (req.is_old()) {
 599             result = allocate_old_with_affiliation(FREE, req, in_new_region);
 600           } else {
 601             result = allocate_with_affiliation(FREE, req, in_new_region);
 602           }
 603           if (result != nullptr) {
 604             return result;
 605           }
 606         }
 607       }

 608       // No dice. Can we borrow space from mutator view?
 609       if (!ShenandoahEvacReserveOverflow) {
 610         return nullptr;
 611       }
 612 
 613       if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
 614         // This allows us to flip a mutator region to old_collector
 615         allow_new_region = true;
 616       }
 617 
 618       // We should expand old-gen if this can prevent an old-gen evacuation failure.  We don't care so much about
 619       // promotion failures since they can be mitigated in a subsequent GC pass.  Would be nice to know if this
 620       // allocation request is for evacuation or promotion.  Individual threads limit their use of PLAB memory for
 621       // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
 622       // only for old-gen evacuations.
 623 
 624       // Also TODO:
 625       // if (GC is idle (out of cycle) and mutator allocation fails and there is memory reserved in Collector
 626       // or OldCollector sets, transfer a region of memory so that we can satisfy the allocation request, and
 627       // immediately trigger the start of GC.  Is better to satisfy the allocation than to trigger out-of-cycle
 628       // allocation failure (even if this means we have a little less memory to handle evacuations during the
 629       // subsequent GC pass).
 630 
 631       if (allow_new_region) {
 632         // Try to steal an empty region from the mutator view.
 633         for (size_t c = _free_sets.rightmost_empty(Mutator) + 1; c > _free_sets.leftmost_empty(Mutator); c--) {
 634           size_t idx = c - 1;
 635           if (_free_sets.in_free_set(idx, Mutator)) {
 636             ShenandoahHeapRegion* r = _heap->get_region(idx);
 637             if (can_allocate_from(r)) {
 638               if (req.is_old()) {
 639                 flip_to_old_gc(r);
 640               } else {
 641                 flip_to_gc(r);
 642               }
 643               HeapWord *result = try_allocate_in(r, req, in_new_region);
 644               if (result != nullptr) {
 645                 log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 646                 return result;
 647               }
 648             }
 649           }
 650         }
 651       }
 652 
 653       // No dice. Do not try to mix mutator and GC allocations, because
 654       // URWM moves due to GC allocations would expose unparsable mutator
 655       // allocations.

 656       break;
 657     }
 658     default:
 659       ShouldNotReachHere();
 660   }

 661   return nullptr;
 662 }
 663 
 664 // This work method takes an argument corresponding to the number of bytes
 665 // free in a region, and returns the largest amount in heapwords that can be allocated
 666 // such that both of the following conditions are satisfied:
 667 //
 668 // 1. it is a multiple of card size
 669 // 2. any remaining shard may be filled with a filler object
 670 //
 671 // The idea is that the allocation starts and ends at card boundaries. Because
 672 // a region ('s end) is card-aligned, the remainder shard that must be filled is
 673 // at the start of the free space.
 674 //
 675 // This is merely a helper method to use for the purpose of such a calculation.
 676 size_t get_usable_free_words(size_t free_bytes) {
 677   // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
 678   //      free is 514
 679   //      usable_free is 512, which is decreased to 0
 680   size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
 681   assert(usable_free <= free_bytes, "Sanity check");
 682   if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
 683     // After aligning to card multiples, the remainder would be smaller than
 684     // the minimum filler object, so we'll need to take away another card's
 685     // worth to construct a filler object.
 686     if (usable_free >= CardTable::card_size()) {
 687       usable_free -= CardTable::card_size();
 688     } else {
 689       assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
 690     }
 691   }
 692 
 693   return usable_free / HeapWordSize;
 694 }
 695 
 696 // Given a size argument, which is a multiple of card size, a request struct
 697 // for a PLAB, and an old region, return a pointer to the allocated space for
 698 // a PLAB which is card-aligned and where any remaining shard in the region
 699 // has been suitably filled by a filler object.
 700 // It is assumed (and assertion-checked) that such an allocation is always possible.
 701 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
 702   assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 703   assert(r->is_old(), "All PLABs reside in old-gen");
 704   assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
 705   assert(size % CardTable::card_size_in_words() == 0, "size must be multiple of card table size, was " SIZE_FORMAT, size);
 706 
 707   HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
 708   assert(result != nullptr, "Allocation cannot fail");
 709   assert(r->top() <= r->end(), "Allocation cannot span end of region");
 710   assert(req.actual_size() == size, "Should not have needed to adjust size for PLAB.");
 711   assert(((uintptr_t) result) % CardTable::card_size_in_words() == 0, "PLAB start must align with card boundary");
 712 
 713   return result;
 714 }
 715 
 716 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 717   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 718   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
 719     return nullptr;
 720   }
 721 
 722   try_recycle_trashed(r);
 723   if (!r->is_affiliated()) {
 724     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
 725     r->set_affiliation(req.affiliation());
 726     if (r->is_old()) {
 727       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
 728       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
 729       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
 730       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
 731       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
 732       // coalesce-and-fill processing.
 733       r->end_preemptible_coalesce_and_fill();
 734       _heap->clear_cards_for(r);
 735       _heap->old_generation()->increment_affiliated_region_count();
 736     } else {
 737       _heap->young_generation()->increment_affiliated_region_count();
 738     }
 739 
 740     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
 741     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
 742   } else if (r->affiliation() != req.affiliation()) {
 743     assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
 744            req.affiliation_name(), r->affiliation_name());
 745     return nullptr;
 746   }
 747 
 748   in_new_region = r->is_empty();
 749   HeapWord* result = nullptr;

 750 
 751   if (in_new_region) {
 752     log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 753                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
 754   }
 755 
 756   // req.size() is in words, r->free() is in bytes.
 757   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 758     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
 759       assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 760       assert(_free_sets.in_free_set(r->index(), OldCollector), "PLABS must be allocated in old_collector_free regions");
 761       // Need to assure that plabs are aligned on multiple of card region.
 762       // Since we have Elastic TLABs, align sizes up. They may be decreased to fit in the usable
 763       // memory remaining in the region (which will also be aligned to cards).
 764       size_t adjusted_size = align_up(req.size(), CardTable::card_size_in_words());
 765       size_t adjusted_min_size = align_up(req.min_size(), CardTable::card_size_in_words());
 766       size_t usable_free = get_usable_free_words(r->free());
 767 
 768       if (adjusted_size > usable_free) {
 769         adjusted_size = usable_free;
 770       }
 771 
 772       if (adjusted_size >= adjusted_min_size) {
 773         result = allocate_aligned_plab(adjusted_size, req, r);
 774       }
 775       // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
 776     } else {
 777       // This is a GCLAB or a TLAB allocation
 778       size_t adjusted_size = req.size();
 779       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 780       if (adjusted_size > free) {
 781         adjusted_size = free;
 782       }
 783       if (adjusted_size >= req.min_size()) {
 784         result = r->allocate(adjusted_size, req);
 785         assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
 786         req.set_actual_size(adjusted_size);
 787       } else {
 788         log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
 789                            " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
 790       }
 791     }
 792   } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) {
 793 
 794     // inelastic PLAB
 795     size_t size = req.size();
 796     size_t usable_free = get_usable_free_words(r->free());
 797     if (size <= usable_free) {
 798       result = allocate_aligned_plab(size, req, r);
 799     }
 800   } else {
 801     size_t size = req.size();
 802     result = r->allocate(size, req);
 803     if (result != nullptr) {
 804       // Record actual allocation size
 805       req.set_actual_size(size);
 806     }
 807   }
 808 
 809   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
 810   if (result != nullptr) {
 811     // Allocation successful, bump stats:
 812     if (req.is_mutator_alloc()) {
 813       assert(req.is_young(), "Mutator allocations always come from young generation.");
 814       _free_sets.increase_used(Mutator, req.actual_size() * HeapWordSize);
 815     } else {
 816       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
 817 
 818       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
 819       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
 820       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
 821       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
 822       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
 823       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
 824       // next evacuation pass.
 825       r->set_update_watermark(r->top());
 826       if (r->is_old()) {
 827         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
 828         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
 829       }
 830     }
 831   }
 832 
 833   if (result == nullptr || alloc_capacity(r) < PLAB::min_size() * HeapWordSize) {
 834     // Region cannot afford this and is likely to not afford future allocations. Retire it.
 835     //
 836     // While this seems a bit harsh, especially in the case when this large allocation does not
 837     // fit but the next small one would, we are risking to inflate scan times when lots of
 838     // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.

 839 
 840     // Record the remainder as allocation waste
 841     size_t idx = r->index();
 842     if (req.is_mutator_alloc()) {
 843       size_t waste = r->free();
 844       if (waste > 0) {
 845         _free_sets.increase_used(Mutator, waste);
 846         // This one request could cause several regions to be "retired", so we must accumulate the waste
 847         req.set_waste((waste >> LogHeapWordSize) + req.waste());
 848       }
 849       assert(_free_sets.membership(idx) == Mutator, "Must be mutator free: " SIZE_FORMAT, idx);
 850     } else {
 851       assert(_free_sets.membership(idx) == Collector || _free_sets.membership(idx) == OldCollector,
 852              "Must be collector or old-collector free: " SIZE_FORMAT, idx);
 853     }
 854     // This region is no longer considered free (in any set)
 855     _free_sets.remove_from_free_sets(idx);
 856     _free_sets.assert_bounds();






 857   }
 858   return result;
 859 }
 860 
































 861 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 862   shenandoah_assert_heaplocked();
 863 
 864   size_t words_size = req.size();
 865   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 866 
 867   assert(req.is_young(), "Humongous regions always allocated in YOUNG");
 868   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
 869 
 870   // Check if there are enough regions left to satisfy allocation.
 871   if (_heap->mode()->is_generational()) {
 872     size_t avail_young_regions = generation->free_unaffiliated_regions();
 873     if (num > _free_sets.count(Mutator) || (num > avail_young_regions)) {
 874       return nullptr;
 875     }
 876   } else {
 877     if (num > _free_sets.count(Mutator)) {
 878       return nullptr;
 879     }
 880   }
 881 
 882   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 883   // inclusive. Contiguous allocations are biased to the beginning.
 884 
 885   size_t beg = _free_sets.leftmost(Mutator);
 886   size_t end = beg;
 887 
 888   while (true) {
 889     if (end >= _free_sets.max()) {
 890       // Hit the end, goodbye
 891       return nullptr;
 892     }
 893 
 894     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 895     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 896     if (!_free_sets.in_free_set(end, Mutator) || !can_allocate_from(_heap->get_region(end))) {
 897       end++;
 898       beg = end;
 899       continue;
 900     }
 901 
 902     if ((end - beg + 1) == num) {
 903       // found the match
 904       break;
 905     }
 906 
 907     end++;
 908   }
 909 
 910   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 911   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
 912 
 913   // Initialize regions:
 914   for (size_t i = beg; i <= end; i++) {
 915     ShenandoahHeapRegion* r = _heap->get_region(i);
 916     try_recycle_trashed(r);
 917 
 918     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 919     assert(r->is_empty(), "Should be empty");
 920 
 921     if (i == beg) {
 922       r->make_humongous_start();
 923     } else {
 924       r->make_humongous_cont();
 925     }
 926 
 927     // Trailing region may be non-full, record the remainder there
 928     size_t used_words;
 929     if ((i == end) && (remainder != 0)) {
 930       used_words = remainder;
 931     } else {
 932       used_words = ShenandoahHeapRegion::region_size_words();
 933     }
 934 
 935     r->set_affiliation(req.affiliation());
 936     r->set_update_watermark(r->bottom());
 937     r->set_top(r->bottom() + used_words);
 938 
 939     // While individual regions report their true use, all humongous regions are marked used in the free set.
 940     _free_sets.remove_from_free_sets(r->index());
 941   }
 942   _heap->young_generation()->increase_affiliated_region_count(num);
 943 
 944   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
 945   _free_sets.increase_used(Mutator, total_humongous_size);
 946   _free_sets.assert_bounds();
 947   req.set_actual_size(words_size);
 948   if (remainder != 0) {
 949     req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);

 950   }








 951   return _heap->get_region(beg)->bottom();
 952 }
 953 
 954 // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
 955 // immediate trash and we'll be able to immediately recycle it.  Note that we cannot recycle immediate trash if
 956 // concurrent weak root processing is in progress.
 957 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 958   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 959 }
 960 
 961 bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 962   ShenandoahHeapRegion* r = _heap->get_region(idx);
 963   return can_allocate_from(r);
 964 }
 965 
 966 size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
 967   ShenandoahHeapRegion* r = _heap->get_region(idx);
 968   return alloc_capacity(r);
 969 }
 970 
 971 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 972   if (r->is_trash()) {
 973     // This would be recycled on allocation path
 974     return ShenandoahHeapRegion::region_size_bytes();
 975   } else {
 976     return r->free();
 977   }
 978 }
 979 
 980 bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
 981   return alloc_capacity(r) > 0;
 982 }
 983 
 984 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 985   if (r->is_trash()) {

 986     r->recycle();
 987   }
 988 }
 989 
 990 void ShenandoahFreeSet::recycle_trash() {
 991   // lock is not reentrable, check we don't have it
 992   shenandoah_assert_not_heaplocked();
 993 
 994   for (size_t i = 0; i < _heap->num_regions(); i++) {
 995     ShenandoahHeapRegion* r = _heap->get_region(i);
 996     if (r->is_trash()) {
 997       ShenandoahHeapLocker locker(_heap->lock());
 998       try_recycle_trashed(r);
 999     }
1000     SpinPause(); // allow allocators to take the lock
1001   }
1002 }
1003 
1004 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
1005   size_t idx = r->index();
1006 
1007   assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
1008   // Note: can_allocate_from(r) means r is entirely empty
1009   assert(can_allocate_from(r), "Should not be allocated");
1010 
1011   size_t region_capacity = alloc_capacity(r);
1012   _free_sets.move_to_set(idx, OldCollector, region_capacity);
1013   _free_sets.assert_bounds();
1014   _heap->augment_old_evac_reserve(region_capacity);
1015   bool transferred = _heap->generation_sizer()->transfer_to_old(1);
1016   if (!transferred) {
1017     log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx);
1018     _heap->generation_sizer()->force_transfer_to_old(1);
1019   }
1020   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1021   // to recycle trash before attempting to allocate anything in the region.
1022 }
1023 
1024 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1025   size_t idx = r->index();
1026 
1027   assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
1028   assert(can_allocate_from(r), "Should not be allocated");
1029 
1030   size_t region_capacity = alloc_capacity(r);
1031   _free_sets.move_to_set(idx, Collector, region_capacity);
1032   _free_sets.assert_bounds();
1033 
1034   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1035   // to recycle trash before attempting to allocate anything in the region.


1036 }
1037 
1038 void ShenandoahFreeSet::clear() {
1039   shenandoah_assert_heaplocked();
1040   clear_internal();
1041 }
1042 
1043 void ShenandoahFreeSet::clear_internal() {
1044   _free_sets.clear_all();







1045 }
1046 
1047 // This function places all is_old() regions that have allocation capacity into the old_collector set.  It places
1048 // all other regions (not is_old()) that have allocation capacity into the mutator_set.  Subsequently, we will
1049 // move some of the mutator regions into the collector set or old_collector set with the intent of packing
1050 // old_collector memory into the highest (rightmost) addresses of the heap and the collector memory into the
1051 // next highest addresses of the heap, with mutator memory consuming the lowest addresses of the heap.
1052 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
1053                                                          size_t &first_old_region, size_t &last_old_region,
1054                                                          size_t &old_region_count) {
1055   first_old_region = _heap->num_regions();
1056   last_old_region = 0;
1057   old_region_count = 0;
1058   old_cset_regions = 0;
1059   young_cset_regions = 0;
1060   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
1061     ShenandoahHeapRegion* region = _heap->get_region(idx);
1062     if (region->is_trash()) {
1063       // Trashed regions represent regions that had been in the collection set but have not yet been "cleaned up".
1064       if (region->is_old()) {
1065         old_cset_regions++;
1066       } else {
1067         assert(region->is_young(), "Trashed region should be old or young");
1068         young_cset_regions++;
1069       }
1070     } else if (region->is_old() && region->is_regular()) {
1071       old_region_count++;
1072       if (first_old_region > idx) {
1073         first_old_region = idx;
1074       }
1075       last_old_region = idx;
1076     }
1077     if (region->is_alloc_allowed() || region->is_trash()) {
1078       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1079       assert(_free_sets.in_free_set(idx, NotFree), "We are about to make region free; it should not be free already");
1080 
1081       // Do not add regions that would almost surely fail allocation
1082       if (alloc_capacity(region) < PLAB::min_size() * HeapWordSize) continue;
1083 
1084       if (region->is_old()) {
1085         _free_sets.make_free(idx, OldCollector, alloc_capacity(region));
1086         log_debug(gc, free)(
1087           "  Adding Region " SIZE_FORMAT  " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to old collector set",
1088           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1089           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1090       } else {
1091         _free_sets.make_free(idx, Mutator, alloc_capacity(region));
1092         log_debug(gc, free)(
1093           "  Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator set",
1094           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1095           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1096       }
1097     }
1098   }
1099 }
1100 
1101 // Move no more than cset_regions from the existing Collector and OldCollector free sets to the Mutator free set.
1102 // This is called from outside the heap lock.
1103 void ShenandoahFreeSet::move_collector_sets_to_mutator(size_t max_xfer_regions) {
1104   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1105   size_t collector_empty_xfer = 0;
1106   size_t collector_not_empty_xfer = 0;
1107   size_t old_collector_empty_xfer = 0;
1108 
1109   // Process empty regions within the Collector free set
1110   if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(Collector) <= _free_sets.rightmost_empty(Collector))) {
1111     ShenandoahHeapLocker locker(_heap->lock());
1112     for (size_t idx = _free_sets.leftmost_empty(Collector);
1113          (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(Collector)); idx++) {
1114       if (_free_sets.in_free_set(idx, Collector) && can_allocate_from(idx)) {
1115         _free_sets.move_to_set(idx, Mutator, region_size_bytes);
1116         max_xfer_regions--;
1117         collector_empty_xfer += region_size_bytes;
1118       }
1119     }
1120   }
1121 
1122   // Process empty regions within the OldCollector free set
1123   size_t old_collector_regions = 0;
1124   if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(OldCollector) <= _free_sets.rightmost_empty(OldCollector))) {
1125     ShenandoahHeapLocker locker(_heap->lock());
1126     for (size_t idx = _free_sets.leftmost_empty(OldCollector);
1127          (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(OldCollector)); idx++) {
1128       if (_free_sets.in_free_set(idx, OldCollector) && can_allocate_from(idx)) {
1129         _free_sets.move_to_set(idx, Mutator, region_size_bytes);
1130         max_xfer_regions--;
1131         old_collector_empty_xfer += region_size_bytes;
1132         old_collector_regions++;
1133       }
1134     }
1135     if (old_collector_regions > 0) {
1136       _heap->generation_sizer()->transfer_to_young(old_collector_regions);
1137     }
1138   }
1139 
1140   // If there are any non-empty regions within Collector set, we can also move them to the Mutator free set
1141   if ((max_xfer_regions > 0) && (_free_sets.leftmost(Collector) <= _free_sets.rightmost(Collector))) {
1142     ShenandoahHeapLocker locker(_heap->lock());
1143     for (size_t idx = _free_sets.leftmost(Collector); (max_xfer_regions > 0) && (idx <= _free_sets.rightmost(Collector)); idx++) {
1144       size_t alloc_capacity = this->alloc_capacity(idx);
1145       if (_free_sets.in_free_set(idx, Collector) && (alloc_capacity > 0)) {
1146         _free_sets.move_to_set(idx, Mutator, alloc_capacity);
1147         max_xfer_regions--;
1148         collector_not_empty_xfer += alloc_capacity;
1149       }
1150     }
1151   }
1152 
1153   size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
1154   size_t total_xfer = collector_xfer + old_collector_empty_xfer;
1155   log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1156                      SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1157                      byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1158                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1159                      byte_size_in_proper_unit(old_collector_empty_xfer), proper_unit_for_byte_size(old_collector_empty_xfer));
1160 }
1161 


1162 
1163 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1164 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
1165                                            size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
1166   shenandoah_assert_heaplocked();
1167   // This resets all state information, removing all regions from all sets.
1168   clear();
1169   log_debug(gc, free)("Rebuilding FreeSet");
1170 
1171   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1172   // mutator set otherwise.
1173   find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
1174 }
1175 
1176 void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions) {
1177   shenandoah_assert_heaplocked();
1178   size_t young_reserve, old_reserve;
1179   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1180 
1181   size_t old_capacity = _heap->old_generation()->max_capacity();
1182   size_t old_available = _heap->old_generation()->available();
1183   size_t old_unaffiliated_regions = _heap->old_generation()->free_unaffiliated_regions();
1184   size_t young_capacity = _heap->young_generation()->max_capacity();
1185   size_t young_available = _heap->young_generation()->available();
1186   size_t young_unaffiliated_regions = _heap->young_generation()->free_unaffiliated_regions();
1187 
1188   old_unaffiliated_regions += old_cset_regions;
1189   old_available += old_cset_regions * region_size_bytes;
1190   young_unaffiliated_regions += young_cset_regions;
1191   young_available += young_cset_regions * region_size_bytes;
1192 
1193   // Consult old-region surplus and deficit to make adjustments to current generation capacities and availability.
1194   // The generation region transfers take place after we rebuild.
1195   size_t old_region_surplus = _heap->get_old_region_surplus();
1196   size_t old_region_deficit = _heap->get_old_region_deficit();
1197 
1198   if (old_region_surplus > 0) {
1199     size_t xfer_bytes = old_region_surplus * region_size_bytes;
1200     assert(old_region_surplus <= old_unaffiliated_regions, "Cannot transfer regions that are affiliated");
1201     old_capacity -= xfer_bytes;
1202     old_available -= xfer_bytes;
1203     old_unaffiliated_regions -= old_region_surplus;
1204     young_capacity += xfer_bytes;
1205     young_available += xfer_bytes;
1206     young_unaffiliated_regions += old_region_surplus;
1207   } else if (old_region_deficit > 0) {
1208     size_t xfer_bytes = old_region_deficit * region_size_bytes;
1209     assert(old_region_deficit <= young_unaffiliated_regions, "Cannot transfer regions that are affiliated");
1210     old_capacity += xfer_bytes;
1211     old_available += xfer_bytes;
1212     old_unaffiliated_regions += old_region_deficit;
1213     young_capacity -= xfer_bytes;
1214     young_available -= xfer_bytes;
1215     young_unaffiliated_regions -= old_region_deficit;
1216   }
1217 
1218   // Evac reserve: reserve trailing space for evacuations, with regions reserved for old evacuations placed to the right
1219   // of regions reserved of young evacuations.
1220   if (!_heap->mode()->is_generational()) {
1221     young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1222     old_reserve = 0;
1223   } else {
1224     // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1225     // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
1226     // which is reserved for promotion is enforced using thread-local variables that prescribe intentons for
1227     // each PLAB's available memory.
1228     if (_heap->has_evacuation_reserve_quantities()) {
1229       // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1230       young_reserve = _heap->get_young_evac_reserve();
1231       old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve();
1232       assert(old_reserve <= old_available,
1233              "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1234              _heap->get_promoted_reserve(), _heap->get_old_evac_reserve(), old_available);
1235     } else {
1236       // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1237       young_reserve = (young_capacity * ShenandoahEvacReserve) / 100;
1238       // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1239       // Affiliated old-gen regions are already in the OldCollector free set.  Add in the relevant number of
1240       // unaffiliated regions.
1241       old_reserve = old_available;
1242     }
1243   }
1244 
1245   // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1246   // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
1247   // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1248   // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1249   if (old_reserve > _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1250     old_reserve = _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes;
1251   }
1252 
1253   if (young_reserve > young_unaffiliated_regions * region_size_bytes) {
1254     young_reserve = young_unaffiliated_regions * region_size_bytes;
1255   }
1256 
1257   reserve_regions(young_reserve, old_reserve);
1258   _free_sets.establish_alloc_bias(OldCollector);
1259   _free_sets.assert_bounds();
1260   log_status();
1261 }
1262 
1263 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1264 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1265 // into the collector set or old collector set in order to assure that the memory available for allocations within
1266 // the collector set is at least to_reserve, and the memory available for allocations within the old collector set
1267 // is at least to_reserve_old.
1268 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old) {
1269   for (size_t i = _heap->num_regions(); i > 0; i--) {
1270     size_t idx = i - 1;
1271     ShenandoahHeapRegion* r = _heap->get_region(idx);
1272     if (!_free_sets.in_free_set(idx, Mutator)) {
1273       continue;
1274     }
1275 
1276     size_t ac = alloc_capacity(r);
1277     assert (ac > 0, "Membership in free set implies has capacity");
1278     assert (!r->is_old(), "mutator_is_free regions should not be affiliated OLD");
1279 
1280     bool move_to_old = _free_sets.capacity_of(OldCollector) < to_reserve_old;
1281     bool move_to_young = _free_sets.capacity_of(Collector) < to_reserve;
1282 
1283     if (!move_to_old && !move_to_young) {
1284       // We've satisfied both to_reserve and to_reserved_old
1285       break;
1286     }
1287 
1288     if (move_to_old) {
1289       if (r->is_trash() || !r->is_affiliated()) {
1290         // OLD regions that have available memory are already in the old_collector free set
1291         _free_sets.move_to_set(idx, OldCollector, ac);
1292         log_debug(gc, free)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1293         continue;
1294       }
1295     }
1296 
1297     if (move_to_young) {
1298       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1299       // they were entirely empty.  I'm not sure I understand the rationale for that.  That alternative behavior would
1300       // tend to mix survivor objects with ephemeral objects, making it more difficult to reclaim the memory for the
1301       // ephemeral objects.  It also delays aging of regions, causing promotion in place to be delayed.
1302       _free_sets.move_to_set(idx, Collector, ac);
1303       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1304     }
1305   }
1306 
1307   if (LogTarget(Info, gc, free)::is_enabled()) {
1308     size_t old_reserve = _free_sets.capacity_of(OldCollector);
1309     if (old_reserve < to_reserve_old) {
1310       log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
1311                          PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
1312     }
1313     size_t young_reserve = _free_sets.capacity_of(Collector);
1314     if (young_reserve < to_reserve) {
1315       log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1316                          PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve));
1317     }
1318   }
1319 }
1320 
1321 void ShenandoahFreeSet::log_status() {
1322   shenandoah_assert_heaplocked();
1323 
1324 #ifdef ASSERT
1325   // Dump of the FreeSet details is only enabled if assertions are enabled
1326   if (LogTarget(Debug, gc, free)::is_enabled()) {
1327 #define BUFFER_SIZE 80
1328     size_t retired_old = 0;
1329     size_t retired_old_humongous = 0;
1330     size_t retired_young = 0;
1331     size_t retired_young_humongous = 0;
1332     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1333     size_t retired_young_waste = 0;
1334     size_t retired_old_waste = 0;
1335     size_t consumed_collector = 0;
1336     size_t consumed_old_collector = 0;
1337     size_t consumed_mutator = 0;
1338     size_t available_old = 0;
1339     size_t available_young = 0;
1340     size_t available_mutator = 0;
1341     size_t available_collector = 0;
1342     size_t available_old_collector = 0;
1343 
1344     char buffer[BUFFER_SIZE];
1345     for (uint i = 0; i < BUFFER_SIZE; i++) {
1346       buffer[i] = '\0';
1347     }
1348     log_debug(gc, free)("FreeSet map legend:"
1349                        " M:mutator_free C:collector_free O:old_collector_free"
1350                        " H:humongous ~:retired old _:retired young");
1351     log_debug(gc, free)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1352                        " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1353                        "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
1354                        _free_sets.leftmost(Mutator), _free_sets.rightmost(Mutator),
1355                        _free_sets.leftmost(Collector), _free_sets.rightmost(Collector),
1356                        _free_sets.leftmost(OldCollector), _free_sets.rightmost(OldCollector),
1357                        _free_sets.alloc_from_left_bias(OldCollector)? "left to right": "right to left");
1358 
1359     for (uint i = 0; i < _heap->num_regions(); i++) {
1360       ShenandoahHeapRegion *r = _heap->get_region(i);
1361       uint idx = i % 64;
1362       if ((i != 0) && (idx == 0)) {
1363         log_debug(gc, free)(" %6u: %s", i-64, buffer);
1364       }
1365       if (_free_sets.in_free_set(i, Mutator)) {
1366         assert(!r->is_old(), "Old regions should not be in mutator_free set");
1367         size_t capacity = alloc_capacity(r);
1368         available_mutator += capacity;
1369         consumed_mutator += region_size_bytes - capacity;
1370         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1371       } else if (_free_sets.in_free_set(i, Collector)) {
1372         assert(!r->is_old(), "Old regions should not be in collector_free set");
1373         size_t capacity = alloc_capacity(r);
1374         available_collector += capacity;
1375         consumed_collector += region_size_bytes - capacity;
1376         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
1377       } else if (_free_sets.in_free_set(i, OldCollector)) {
1378         size_t capacity = alloc_capacity(r);
1379         available_old_collector += capacity;
1380         consumed_old_collector += region_size_bytes - capacity;
1381         buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
1382       } else if (r->is_humongous()) {
1383         if (r->is_old()) {
1384           buffer[idx] = 'H';
1385           retired_old_humongous += region_size_bytes;
1386         } else {
1387           buffer[idx] = 'h';
1388           retired_young_humongous += region_size_bytes;
1389         }
1390       } else {
1391         if (r->is_old()) {
1392           buffer[idx] = '~';
1393           retired_old_waste += alloc_capacity(r);
1394           retired_old += region_size_bytes;
1395         } else {
1396           buffer[idx] = '_';
1397           retired_young_waste += alloc_capacity(r);
1398           retired_young += region_size_bytes;
1399         }
1400       }
1401     }
1402     uint remnant = _heap->num_regions() % 64;
1403     if (remnant > 0) {
1404       buffer[remnant] = '\0';
1405     } else {
1406       remnant = 64;
1407     }
1408     log_debug(gc, free)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1409     size_t total_young = retired_young + retired_young_humongous;
1410     size_t total_old = retired_old + retired_old_humongous;
1411   }
1412 #endif
1413 
1414   LogTarget(Info, gc, free) lt;
1415   if (lt.is_enabled()) {
1416     ResourceMark rm;
1417     LogStream ls(lt);
1418 
1419     {
1420       size_t last_idx = 0;
1421       size_t max = 0;
1422       size_t max_contig = 0;
1423       size_t empty_contig = 0;
1424 
1425       size_t total_used = 0;
1426       size_t total_free = 0;
1427       size_t total_free_ext = 0;
1428 
1429       for (size_t idx = _free_sets.leftmost(Mutator); idx <= _free_sets.rightmost(Mutator); idx++) {
1430         if (_free_sets.in_free_set(idx, Mutator)) {
1431           ShenandoahHeapRegion *r = _heap->get_region(idx);
1432           size_t free = alloc_capacity(r);

1433           max = MAX2(max, free);

1434           if (r->is_empty()) {
1435             total_free_ext += free;
1436             if (last_idx + 1 == idx) {
1437               empty_contig++;
1438             } else {
1439               empty_contig = 1;
1440             }
1441           } else {
1442             empty_contig = 0;
1443           }

1444           total_used += r->used();
1445           total_free += free;

1446           max_contig = MAX2(max_contig, empty_contig);
1447           last_idx = idx;
1448         }
1449       }
1450 
1451       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1452       size_t free = capacity() - used();
1453 
1454       assert(free == total_free, "Sum of free within mutator regions (" SIZE_FORMAT
1455              ") should match mutator capacity (" SIZE_FORMAT ") minus mutator used (" SIZE_FORMAT ")",
1456              total_free, capacity(), used());
1457 
1458       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1459                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
1460                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1461                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1462       );
1463 
1464       ls.print("Frag: ");
1465       size_t frag_ext;
1466       if (total_free_ext > 0) {
1467         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1468       } else {
1469         frag_ext = 0;
1470       }
1471       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1472 
1473       size_t frag_int;
1474       if (_free_sets.count(Mutator) > 0) {
1475         frag_int = (100 * (total_used / _free_sets.count(Mutator)) / ShenandoahHeapRegion::region_size_bytes());
1476       } else {
1477         frag_int = 0;
1478       }
1479       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
1480       ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
1481                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), _free_sets.count(Mutator));
1482     }
1483 
1484     {
1485       size_t max = 0;
1486       size_t total_free = 0;
1487       size_t total_used = 0;
1488 
1489       for (size_t idx = _free_sets.leftmost(Collector); idx <= _free_sets.rightmost(Collector); idx++) {
1490         if (_free_sets.in_free_set(idx, Collector)) {
1491           ShenandoahHeapRegion *r = _heap->get_region(idx);
1492           size_t free = alloc_capacity(r);
1493           max = MAX2(max, free);
1494           total_free += free;
1495           total_used += r->used();
1496         }
1497       }
1498       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1499                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1500                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1501                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1502     }
1503 
1504     if (_heap->mode()->is_generational()) {
1505       size_t max = 0;
1506       size_t total_free = 0;
1507       size_t total_used = 0;
1508 
1509       for (size_t idx = _free_sets.leftmost(OldCollector); idx <= _free_sets.rightmost(OldCollector); idx++) {
1510         if (_free_sets.in_free_set(idx, OldCollector)) {
1511           ShenandoahHeapRegion *r = _heap->get_region(idx);
1512           size_t free = alloc_capacity(r);
1513           max = MAX2(max, free);
1514           total_free += free;
1515           total_used += r->used();
1516         }
1517       }
1518       ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1519                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1520                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1521                   byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1522     }
1523   }
1524 }
1525 
1526 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1527   shenandoah_assert_heaplocked();

1528 
1529   // Allocation request is known to satisfy all memory budgeting constraints.
1530   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
1531     switch (req.type()) {
1532       case ShenandoahAllocRequest::_alloc_shared:
1533       case ShenandoahAllocRequest::_alloc_shared_gc:
1534         in_new_region = true;
1535         return allocate_contiguous(req);
1536       case ShenandoahAllocRequest::_alloc_plab:
1537       case ShenandoahAllocRequest::_alloc_gclab:
1538       case ShenandoahAllocRequest::_alloc_tlab:
1539         in_new_region = false;
1540         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
1541                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
1542         return nullptr;
1543       default:
1544         ShouldNotReachHere();
1545         return nullptr;
1546     }
1547   } else {
1548     return allocate_single(req, in_new_region);
1549   }
1550 }
1551 
1552 size_t ShenandoahFreeSet::unsafe_peek_free() const {
1553   // Deliberately not locked, this method is unsafe when free set is modified.
1554 
1555   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1556     if (index < _free_sets.max() && _free_sets.in_free_set(index, Mutator)) {
1557       ShenandoahHeapRegion* r = _heap->get_region(index);
1558       if (r->free() >= MinTLABSize) {
1559         return r->free();
1560       }
1561     }
1562   }
1563 
1564   // It appears that no regions left
1565   return 0;
1566 }
1567 
1568 void ShenandoahFreeSet::print_on(outputStream* out) const {
1569   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _free_sets.count(Mutator));
1570   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1571     if (_free_sets.in_free_set(index, Mutator)) {
1572       _heap->get_region(index)->print_on(out);
1573     }
1574   }
1575   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _free_sets.count(Collector));
1576   for (size_t index = _free_sets.leftmost(Collector); index <= _free_sets.rightmost(Collector); index++) {
1577     if (_free_sets.in_free_set(index, Collector)) {
1578       _heap->get_region(index)->print_on(out);
1579     }
1580   }
1581   if (_heap->mode()->is_generational()) {
1582     out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _free_sets.count(OldCollector));
1583     for (size_t index = _free_sets.leftmost(OldCollector); index <= _free_sets.rightmost(OldCollector); index++) {
1584       if (_free_sets.in_free_set(index, OldCollector)) {
1585         _heap->get_region(index)->print_on(out);
1586       }
1587     }
1588   }
1589 }
1590 
1591 /*
1592  * Internal fragmentation metric: describes how fragmented the heap regions are.
1593  *
1594  * It is derived as:
1595  *
1596  *               sum(used[i]^2, i=0..k)
1597  *   IF = 1 - ------------------------------
1598  *              C * sum(used[i], i=0..k)
1599  *
1600  * ...where k is the number of regions in computation, C is the region capacity, and
1601  * used[i] is the used space in the region.
1602  *
1603  * The non-linearity causes IF to be lower for the cases where the same total heap
1604  * used is densely packed. For example:
1605  *   a) Heap is completely full  => IF = 0
1606  *   b) Heap is half full, first 50% regions are completely full => IF = 0
1607  *   c) Heap is half full, each region is 50% full => IF = 1/2
1608  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
1609  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
1610  *   f) Heap has one small object per each region => IF =~ 1
1611  */
1612 double ShenandoahFreeSet::internal_fragmentation() {
1613   double squared = 0;
1614   double linear = 0;
1615   int count = 0;
1616 
1617   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1618     if (_free_sets.in_free_set(index, Mutator)) {
1619       ShenandoahHeapRegion* r = _heap->get_region(index);
1620       size_t used = r->used();
1621       squared += used * used;
1622       linear += used;
1623       count++;
1624     }
1625   }
1626 
1627   if (count > 0) {
1628     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
1629     return 1 - s;
1630   } else {
1631     return 0;
1632   }
1633 }
1634 
1635 /*
1636  * External fragmentation metric: describes how fragmented the heap is.
1637  *
1638  * It is derived as:
1639  *
1640  *   EF = 1 - largest_contiguous_free / total_free
1641  *
1642  * For example:
1643  *   a) Heap is completely empty => EF = 0
1644  *   b) Heap is completely full => EF = 0
1645  *   c) Heap is first-half full => EF = 1/2
1646  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
1647  */
1648 double ShenandoahFreeSet::external_fragmentation() {
1649   size_t last_idx = 0;
1650   size_t max_contig = 0;
1651   size_t empty_contig = 0;
1652 
1653   size_t free = 0;
1654 
1655   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1656     if (_free_sets.in_free_set(index, Mutator)) {
1657       ShenandoahHeapRegion* r = _heap->get_region(index);
1658       if (r->is_empty()) {
1659         free += ShenandoahHeapRegion::region_size_bytes();
1660         if (last_idx + 1 == index) {
1661           empty_contig++;
1662         } else {
1663           empty_contig = 1;
1664         }
1665       } else {
1666         empty_contig = 0;
1667       }
1668 
1669       max_contig = MAX2(max_contig, empty_contig);
1670       last_idx = index;
1671     }
1672   }
1673 
1674   if (free > 0) {
1675     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
1676   } else {
1677     return 0;
1678   }
1679 }
1680 



























< prev index next >