1 /*
  2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/tlab_globals.hpp"
 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 35 #include "logging/logStream.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "runtime/orderAccess.hpp"
 38 
 39 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 40   _heap(heap),
 41   _mutator_free_bitmap(max_regions, mtGC),
 42   _collector_free_bitmap(max_regions, mtGC),
 43   _max(max_regions)
 44 {
 45   clear_internal();
 46 }
 47 
 48 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
 49   shenandoah_assert_heaplocked();
 50   _used += num_bytes;
 51 
 52   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
 53          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
 54 }
 55 
 56 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
 57   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 58           idx, _max, _mutator_leftmost, _mutator_rightmost);
 59   return _mutator_free_bitmap.at(idx);
 60 }
 61 
 62 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
 63   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 64           idx, _max, _collector_leftmost, _collector_rightmost);
 65   return _collector_free_bitmap.at(idx);
 66 }
 67 
 68 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahRegionAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
 69   for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
 70     // size_t is unsigned, need to dodge underflow when _leftmost = 0
 71     size_t idx = c - 1;
 72     if (is_collector_free(idx)) {
 73       ShenandoahHeapRegion* r = _heap->get_region(idx);
 74       if (r->affiliation() == affiliation) {
 75         HeapWord* result = try_allocate_in(r, req, in_new_region);
 76         if (result != NULL) {
 77           return result;
 78         }
 79       }
 80     }
 81   }
 82   return NULL;
 83 }
 84 
 85 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 86   // Scan the bitmap looking for a first fit.
 87   //
 88   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 89   // we would find the region to allocate at right away.
 90   //
 91   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
 92   // go to the end. This makes application allocation faster, because we would clear lots
 93   // of regions from the beginning most of the time.
 94   //
 95   // Free set maintains mutator and collector views, and normally they allocate in their views only,
 96   // unless we special cases for stealing and mixed allocations.
 97 
 98   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 99 
100   switch (req.type()) {
101     case ShenandoahAllocRequest::_alloc_tlab:
102     case ShenandoahAllocRequest::_alloc_shared: {
103       // Try to allocate in the mutator view
104       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
105         if (is_mutator_free(idx)) {
106           // try_allocate_in() increases used if the allocation is successful.
107           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
108           if (result != NULL) {
109             return result;
110           }
111         }
112       }
113 
114       // There is no recovery. Mutator does not touch collector view at all.
115       break;
116     }
117     case ShenandoahAllocRequest::_alloc_gclab:
118       // GCLABs are for evacuation so we must be in evacuation phase.  If this allocation is successful, increment
119       // the relevant evac_expended rather than used value.
120 
121     case ShenandoahAllocRequest::_alloc_plab:
122       // PLABs always reside in old-gen and are only allocated during evacuation phase.
123 
124     case ShenandoahAllocRequest::_alloc_shared_gc: {
125       // First try to fit into a region that is already in use in the same generation.
126       HeapWord* result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
127       if (result != NULL) {
128         return result;
129       }
130       // Then try a free region that is dedicated to GC allocations.
131       result = allocate_with_affiliation(FREE, req, in_new_region);
132       if (result != NULL) {
133         return result;
134       }
135 
136       // No dice. Can we borrow space from mutator view?
137       if (!ShenandoahEvacReserveOverflow) {
138         return NULL;
139       }
140 
141       // Try to steal an empty region from the mutator view.
142       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
143         size_t idx = c - 1;
144         if (is_mutator_free(idx)) {
145           ShenandoahHeapRegion* r = _heap->get_region(idx);
146           if (can_allocate_from(r)) {
147             flip_to_gc(r);
148             HeapWord *result = try_allocate_in(r, req, in_new_region);
149             if (result != NULL) {
150               return result;
151             }
152           }
153         }
154       }
155 
156       // No dice. Do not try to mix mutator and GC allocations, because
157       // URWM moves due to GC allocations would expose unparsable mutator
158       // allocations.
159       break;
160     }
161     default:
162       ShouldNotReachHere();
163   }
164   return NULL;
165 }
166 
167 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
168   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
169 
170   if (_heap->is_concurrent_weak_root_in_progress() &&
171       r->is_trash()) {
172     return NULL;
173   }
174 
175   try_recycle_trashed(r);
176 
177   if (r->affiliation() == ShenandoahRegionAffiliation::FREE) {
178     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
179 
180     r->set_affiliation(req.affiliation());
181 
182     if (r->is_old()) {
183       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
184       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
185       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
186       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
187       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
188       // coalesce-and-fill processing.
189       r->end_preemptible_coalesce_and_fill();
190       _heap->clear_cards_for(r);
191     }
192 
193     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
194     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
195 
196     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
197     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
198     log_debug(gc)("NOT clearing bitmap for region " SIZE_FORMAT ", top_bitmap: "
199                   PTR_FORMAT " at transition from FREE to %s",
200                   r->index(), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
201   } else if (r->affiliation() != req.affiliation()) {
202     return NULL;
203   }
204 
205   in_new_region = r->is_empty();
206 
207   HeapWord* result = NULL;
208   size_t size = req.size();
209 
210   // req.size() is in words, r->free() is in bytes.
211   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
212     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
213       // Need to assure that plabs are aligned on multiple of card region.
214       size_t free = r->free();
215       size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift();
216       if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
217         // We'll have to add another card's memory to the padding
218         usable_free -= CardTable::card_size();
219       }
220       free /= HeapWordSize;
221       usable_free /= HeapWordSize;
222       if (size > usable_free) {
223         size = usable_free;
224       }
225       if (size >= req.min_size()) {
226         result = r->allocate_aligned(size, req, CardTable::card_size());
227         if (result != nullptr && free > usable_free) {
228           // Account for the alignment padding
229           size_t padding = (free - usable_free) * HeapWordSize;
230           increase_used(padding);
231           assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
232           _heap->old_generation()->increase_used(padding);
233           // For verification consistency, we need to report this padding to _heap
234           _heap->increase_used(padding);
235         }
236       }
237     } else {
238       // This is a GCLAB or a TLAB allocation
239       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
240       if (size > free) {
241         size = free;
242       }
243       if (size >= req.min_size()) {
244         result = r->allocate(size, req);
245         assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
246       }
247     }
248   } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) {
249     size_t free = r->free();
250     size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift();
251     free /= HeapWordSize;
252     usable_free /= HeapWordSize;
253     if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
254       // We'll have to add another card's memory to the padding
255       usable_free -= CardTable::card_size();
256     }
257     if (size <= usable_free) {
258       assert(size % CardTable::card_size_in_words() == 0, "PLAB size must be multiple of remembered set card size");
259 
260       result = r->allocate_aligned(size, req, CardTable::card_size());
261       if (result != nullptr) {
262         // Account for the alignment padding
263         size_t padding = (free - usable_free) * HeapWordSize;
264         increase_used(padding);
265         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
266         _heap->old_generation()->increase_used(padding);
267         // For verification consistency, we need to report this padding to _heap
268         _heap->increase_used(padding);
269       }
270     }
271   } else {
272     result = r->allocate(size, req);
273   }
274 
275   if (result != NULL) {
276     // Record actual allocation size
277     req.set_actual_size(size);
278 
279     // Allocation successful, bump stats:
280     if (req.is_mutator_alloc()) {
281       // Mutator allocations always pull from young gen.
282       _heap->young_generation()->increase_used(size * HeapWordSize);
283       increase_used(size * HeapWordSize);
284     } else {
285       // assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
286 
287       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
288       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
289       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
290       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
291       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
292       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
293       // next evacuation pass.
294       r->set_update_watermark(r->top());
295 
296       if (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
297         _heap->young_generation()->increase_used(size * HeapWordSize);
298       } else {
299         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "GC Alloc was not YOUNG so must be OLD");
300         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
301         _heap->old_generation()->increase_used(size * HeapWordSize);
302         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
303       }
304     }
305   }
306   if (result == NULL || has_no_alloc_capacity(r)) {
307     // Region cannot afford this or future allocations. Retire it.
308     //
309     // While this seems a bit harsh, especially in the case when this large allocation does not
310     // fit, but the next small one would, we are risking to inflate scan times when lots of
311     // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.
312     // TODO: Record first fully-empty region, and use that for large allocations and/or organize
313     // available free segments within regions for more efficient searches for "good fit".
314 
315     // Record the remainder as allocation waste
316     if (req.is_mutator_alloc()) {
317       size_t waste = r->free();
318       if (waste > 0) {
319         increase_used(waste);
320         _heap->generation_for(req.affiliation())->increase_allocated(waste);
321         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
322       }
323     }
324 
325     size_t num = r->index();
326     _collector_free_bitmap.clear_bit(num);
327     _mutator_free_bitmap.clear_bit(num);
328     // Touched the bounds? Need to update:
329     if (touches_bounds(num)) {
330       adjust_bounds();
331     }
332     assert_bounds();
333   }
334   return result;
335 }
336 
337 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
338   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
339 }
340 
341 void ShenandoahFreeSet::recompute_bounds() {
342   // Reset to the most pessimistic case:
343   _mutator_rightmost = _max - 1;
344   _mutator_leftmost = 0;
345   _collector_rightmost = _max - 1;
346   _collector_leftmost = 0;
347 
348   // ...and adjust from there
349   adjust_bounds();
350 }
351 
352 void ShenandoahFreeSet::adjust_bounds() {
353   // Rewind both mutator bounds until the next bit.
354   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
355     _mutator_leftmost++;
356   }
357   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
358     _mutator_rightmost--;
359   }
360   // Rewind both collector bounds until the next bit.
361   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
362     _collector_leftmost++;
363   }
364   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
365     _collector_rightmost--;
366   }
367 }
368 
369 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
370   shenandoah_assert_heaplocked();
371 
372   size_t words_size = req.size();
373   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
374 
375   // No regions left to satisfy allocation, bye.
376   if (num > mutator_count()) {
377     return NULL;
378   }
379 
380   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
381   // inclusive. Contiguous allocations are biased to the beginning.
382 
383   size_t beg = _mutator_leftmost;
384   size_t end = beg;
385 
386   while (true) {
387     if (end >= _max) {
388       // Hit the end, goodbye
389       return NULL;
390     }
391 
392     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
393     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
394     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
395       end++;
396       beg = end;
397       continue;
398     }
399 
400     if ((end - beg + 1) == num) {
401       // found the match
402       break;
403     }
404 
405     end++;
406   };
407 
408   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
409   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
410 
411   // Initialize regions:
412   for (size_t i = beg; i <= end; i++) {
413     ShenandoahHeapRegion* r = _heap->get_region(i);
414     try_recycle_trashed(r);
415 
416     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
417     assert(r->is_empty(), "Should be empty");
418 
419     if (i == beg) {
420       r->make_humongous_start();
421     } else {
422       r->make_humongous_cont();
423     }
424 
425     // Trailing region may be non-full, record the remainder there
426     size_t used_words;
427     if ((i == end) && (remainder != 0)) {
428       used_words = remainder;
429     } else {
430       used_words = ShenandoahHeapRegion::region_size_words();
431     }
432 
433     r->set_affiliation(req.affiliation());
434     r->set_update_watermark(r->bottom());
435     r->set_top(r->bottom());    // Set top to bottom so we can capture TAMS
436     ctx->capture_top_at_mark_start(r);
437     r->set_top(r->bottom() + used_words); // Then change top to reflect allocation of humongous object.
438     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
439     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
440 
441     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
442     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
443     // ctx->clear_bitmap(r);
444     log_debug(gc)("NOT clearing bitmap for Humongous region [" PTR_FORMAT ", " PTR_FORMAT "], top_bitmap: "
445                   PTR_FORMAT " at transition from FREE to %s",
446                   p2i(r->bottom()), p2i(r->end()), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
447 
448     _mutator_free_bitmap.clear_bit(r->index());
449   }
450 
451   // While individual regions report their true use, all humongous regions are
452   // marked used in the free set.
453   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
454   if (req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
455     _heap->young_generation()->increase_used(words_size * HeapWordSize);
456   } else if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
457     _heap->old_generation()->increase_used(words_size * HeapWordSize);
458   }
459 
460   if (remainder != 0) {
461     // Record this remainder as allocation waste
462     size_t waste = ShenandoahHeapRegion::region_size_words() - remainder;
463     _heap->notify_mutator_alloc_words(waste, true);
464     _heap->generation_for(req.affiliation())->increase_allocated(waste * HeapWordSize);
465   }
466 
467   // Allocated at left/rightmost? Move the bounds appropriately.
468   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
469     adjust_bounds();
470   }
471   assert_bounds();
472   req.set_actual_size(words_size);
473   return _heap->get_region(beg)->bottom();
474 }
475 
476 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
477   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
478 }
479 
480 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
481   if (r->is_trash()) {
482     // This would be recycled on allocation path
483     return ShenandoahHeapRegion::region_size_bytes();
484   } else {
485     return r->free();
486   }
487 }
488 
489 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
490   return alloc_capacity(r) == 0;
491 }
492 
493 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
494   if (r->is_trash()) {
495     _heap->decrease_used(r->used());
496     r->recycle();
497   }
498 }
499 
500 void ShenandoahFreeSet::recycle_trash() {
501   // lock is not reentrable, check we don't have it
502   shenandoah_assert_not_heaplocked();
503   for (size_t i = 0; i < _heap->num_regions(); i++) {
504     ShenandoahHeapRegion* r = _heap->get_region(i);
505     if (r->is_trash()) {
506       ShenandoahHeapLocker locker(_heap->lock());
507       try_recycle_trashed(r);
508     }
509     SpinPause(); // allow allocators to take the lock
510   }
511 }
512 
513 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
514   size_t idx = r->index();
515 
516   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
517   assert(can_allocate_from(r), "Should not be allocated");
518 
519   _mutator_free_bitmap.clear_bit(idx);
520   _collector_free_bitmap.set_bit(idx);
521   _collector_leftmost = MIN2(idx, _collector_leftmost);
522   _collector_rightmost = MAX2(idx, _collector_rightmost);
523 
524   _capacity -= alloc_capacity(r);
525 
526   if (touches_bounds(idx)) {
527     adjust_bounds();
528   }
529   assert_bounds();
530 
531   // We do not ensure that the region is no longer trash,
532   // relying on try_allocate_in(), which always comes next,
533   // to recycle trash before attempting to allocate anything in the region.
534 }
535 
536 void ShenandoahFreeSet::clear() {
537   shenandoah_assert_heaplocked();
538   clear_internal();
539 }
540 
541 void ShenandoahFreeSet::clear_internal() {
542   _mutator_free_bitmap.clear();
543   _collector_free_bitmap.clear();
544   _mutator_leftmost = _max;
545   _mutator_rightmost = 0;
546   _collector_leftmost = _max;
547   _collector_rightmost = 0;
548   _capacity = 0;
549   _used = 0;
550 }
551 
552 void ShenandoahFreeSet::rebuild() {
553   shenandoah_assert_heaplocked();
554   clear();
555 
556   log_debug(gc)("Rebuilding FreeSet");
557   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
558     ShenandoahHeapRegion* region = _heap->get_region(idx);
559     if (region->is_alloc_allowed() || region->is_trash()) {
560       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
561 
562       // Do not add regions that would surely fail allocation
563       if (has_no_alloc_capacity(region)) continue;
564 
565       _capacity += alloc_capacity(region);
566       assert(_used <= _capacity, "must not use more than we have");
567 
568       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
569       _mutator_free_bitmap.set_bit(idx);
570 
571       log_debug(gc)("  Setting Region " SIZE_FORMAT " _mutator_free_bitmap bit to true", idx);
572     }
573   }
574 
575   // Evac reserve: reserve trailing space for evacuations
576   if (!_heap->mode()->is_generational()) {
577     size_t to_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
578     reserve_regions(to_reserve);
579   } else {
580     size_t young_reserve = (_heap->young_generation()->max_capacity() / 100) * ShenandoahEvacReserve;
581     // Note that all allocations performed from old-gen are performed by GC, generally using PLABs for both
582     // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
583     // which is reserved for promotion is enforced using thread-local variables that prescribe intentons within
584     // each PLAB.  We do not reserve any of old-gen memory in order to facilitate the loaning of old-gen memory
585     // to young-gen purposes.
586     size_t old_reserve = 0;
587     size_t to_reserve = young_reserve + old_reserve;
588     reserve_regions(to_reserve);
589   }
590 
591   recompute_bounds();
592   assert_bounds();
593 }
594 
595 void ShenandoahFreeSet::reserve_regions(size_t to_reserve) {
596   size_t reserved = 0;
597 
598   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
599     if (reserved >= to_reserve) break;
600 
601     ShenandoahHeapRegion* region = _heap->get_region(idx);
602     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
603       _mutator_free_bitmap.clear_bit(idx);
604       _collector_free_bitmap.set_bit(idx);
605       size_t ac = alloc_capacity(region);
606       _capacity -= ac;
607       reserved += ac;
608       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
609     }
610   }
611 }
612 
613 void ShenandoahFreeSet::log_status() {
614   shenandoah_assert_heaplocked();
615 
616   LogTarget(Info, gc, ergo) lt;
617   if (lt.is_enabled()) {
618     ResourceMark rm;
619     LogStream ls(lt);
620 
621     {
622       size_t last_idx = 0;
623       size_t max = 0;
624       size_t max_contig = 0;
625       size_t empty_contig = 0;
626 
627       size_t total_used = 0;
628       size_t total_free = 0;
629       size_t total_free_ext = 0;
630 
631       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
632         if (is_mutator_free(idx)) {
633           ShenandoahHeapRegion *r = _heap->get_region(idx);
634           size_t free = alloc_capacity(r);
635 
636           max = MAX2(max, free);
637 
638           if (r->is_empty()) {
639             total_free_ext += free;
640             if (last_idx + 1 == idx) {
641               empty_contig++;
642             } else {
643               empty_contig = 1;
644             }
645           } else {
646             empty_contig = 0;
647           }
648 
649           total_used += r->used();
650           total_free += free;
651 
652           max_contig = MAX2(max_contig, empty_contig);
653           last_idx = idx;
654         }
655       }
656 
657       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
658       size_t free = capacity() - used();
659 
660       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
661                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
662                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
663                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
664       );
665 
666       ls.print("Frag: ");
667       size_t frag_ext;
668       if (total_free_ext > 0) {
669         frag_ext = 100 - (100 * max_humongous / total_free_ext);
670       } else {
671         frag_ext = 0;
672       }
673       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
674 
675       size_t frag_int;
676       if (mutator_count() > 0) {
677         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
678       } else {
679         frag_int = 0;
680       }
681       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
682     }
683 
684     {
685       size_t max = 0;
686       size_t total_free = 0;
687 
688       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
689         if (is_collector_free(idx)) {
690           ShenandoahHeapRegion *r = _heap->get_region(idx);
691           size_t free = alloc_capacity(r);
692           max = MAX2(max, free);
693           total_free += free;
694         }
695       }
696 
697       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
698                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
699                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
700     }
701   }
702 }
703 
704 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
705   shenandoah_assert_heaplocked();
706   assert_bounds();
707 
708   // Allocation request is known to satisfy all memory budgeting constraints.
709   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
710     switch (req.type()) {
711       case ShenandoahAllocRequest::_alloc_shared:
712       case ShenandoahAllocRequest::_alloc_shared_gc:
713         in_new_region = true;
714         return allocate_contiguous(req);
715       case ShenandoahAllocRequest::_alloc_plab:
716       case ShenandoahAllocRequest::_alloc_gclab:
717       case ShenandoahAllocRequest::_alloc_tlab:
718         in_new_region = false;
719         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
720                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
721         return NULL;
722       default:
723         ShouldNotReachHere();
724         return NULL;
725     }
726   } else {
727     return allocate_single(req, in_new_region);
728   }
729 }
730 
731 size_t ShenandoahFreeSet::unsafe_peek_free() const {
732   // Deliberately not locked, this method is unsafe when free set is modified.
733 
734   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
735     if (index < _max && is_mutator_free(index)) {
736       ShenandoahHeapRegion* r = _heap->get_region(index);
737       if (r->free() >= MinTLABSize) {
738         return r->free();
739       }
740     }
741   }
742 
743   // It appears that no regions left
744   return 0;
745 }
746 
747 void ShenandoahFreeSet::print_on(outputStream* out) const {
748   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
749   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
750     if (is_mutator_free(index)) {
751       _heap->get_region(index)->print_on(out);
752     }
753   }
754   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
755   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
756     if (is_collector_free(index)) {
757       _heap->get_region(index)->print_on(out);
758     }
759   }
760 }
761 
762 /*
763  * Internal fragmentation metric: describes how fragmented the heap regions are.
764  *
765  * It is derived as:
766  *
767  *               sum(used[i]^2, i=0..k)
768  *   IF = 1 - ------------------------------
769  *              C * sum(used[i], i=0..k)
770  *
771  * ...where k is the number of regions in computation, C is the region capacity, and
772  * used[i] is the used space in the region.
773  *
774  * The non-linearity causes IF to be lower for the cases where the same total heap
775  * used is densely packed. For example:
776  *   a) Heap is completely full  => IF = 0
777  *   b) Heap is half full, first 50% regions are completely full => IF = 0
778  *   c) Heap is half full, each region is 50% full => IF = 1/2
779  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
780  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
781  *   f) Heap has one small object per each region => IF =~ 1
782  */
783 double ShenandoahFreeSet::internal_fragmentation() {
784   double squared = 0;
785   double linear = 0;
786   int count = 0;
787 
788   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
789     if (is_mutator_free(index)) {
790       ShenandoahHeapRegion* r = _heap->get_region(index);
791       size_t used = r->used();
792       squared += used * used;
793       linear += used;
794       count++;
795     }
796   }
797 
798   if (count > 0) {
799     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
800     return 1 - s;
801   } else {
802     return 0;
803   }
804 }
805 
806 /*
807  * External fragmentation metric: describes how fragmented the heap is.
808  *
809  * It is derived as:
810  *
811  *   EF = 1 - largest_contiguous_free / total_free
812  *
813  * For example:
814  *   a) Heap is completely empty => EF = 0
815  *   b) Heap is completely full => EF = 0
816  *   c) Heap is first-half full => EF = 1/2
817  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
818  */
819 double ShenandoahFreeSet::external_fragmentation() {
820   size_t last_idx = 0;
821   size_t max_contig = 0;
822   size_t empty_contig = 0;
823 
824   size_t free = 0;
825 
826   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
827     if (is_mutator_free(index)) {
828       ShenandoahHeapRegion* r = _heap->get_region(index);
829       if (r->is_empty()) {
830         free += ShenandoahHeapRegion::region_size_bytes();
831         if (last_idx + 1 == index) {
832           empty_contig++;
833         } else {
834           empty_contig = 1;
835         }
836       } else {
837         empty_contig = 0;
838       }
839 
840       max_contig = MAX2(max_contig, empty_contig);
841       last_idx = index;
842     }
843   }
844 
845   if (free > 0) {
846     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
847   } else {
848     return 0;
849   }
850 }
851 
852 #ifdef ASSERT
853 void ShenandoahFreeSet::assert_bounds() const {
854   // Performance invariants. Failing these would not break the free set, but performance
855   // would suffer.
856   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
857   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
858 
859   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
860   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
861 
862   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
863   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
864   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
865   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
866 
867   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
868   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
869 
870   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
871   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
872 
873   beg_off = _collector_free_bitmap.get_next_one_offset(0);
874   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
875   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
876   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
877 }
878 #endif