1 /*
  2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/tlab_globals.hpp"
 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 32 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 33 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 34 #include "logging/logStream.hpp"
 35 #include "memory/resourceArea.hpp"
 36 #include "runtime/orderAccess.hpp"
 37 
 38 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 39   _heap(heap),
 40   _mutator_free_bitmap(max_regions, mtGC),
 41   _collector_free_bitmap(max_regions, mtGC),
 42   _max(max_regions)
 43 {
 44   clear_internal();
 45 }
 46 
 47 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
 48   shenandoah_assert_heaplocked();
 49   _used += num_bytes;
 50 
 51   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
 52          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
 53 }
 54 
 55 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
 56   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 57           idx, _max, _mutator_leftmost, _mutator_rightmost);
 58   return _mutator_free_bitmap.at(idx);
 59 }
 60 
 61 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
 62   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 63           idx, _max, _collector_leftmost, _collector_rightmost);
 64   return _collector_free_bitmap.at(idx);
 65 }
 66 
 67 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahRegionAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
 68   for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
 69     // size_t is unsigned, need to dodge underflow when _leftmost = 0
 70     size_t idx = c - 1;
 71     if (is_collector_free(idx)) {
 72       ShenandoahHeapRegion* r = _heap->get_region(idx);
 73       if (r->affiliation() == affiliation) {
 74         HeapWord* result = try_allocate_in(r, req, in_new_region);
 75         if (result != NULL) {
 76           return result;
 77         }
 78       }
 79     }
 80   }
 81   return NULL;
 82 }
 83 
 84 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 85   // Scan the bitmap looking for a first fit.
 86   //
 87   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 88   // we would find the region to allocate at right away.
 89   //
 90   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
 91   // go to the end. This makes application allocation faster, because we would clear lots
 92   // of regions from the beginning most of the time.
 93   //
 94   // Free set maintains mutator and collector views, and normally they allocate in their views only,
 95   // unless we special cases for stealing and mixed allocations.
 96 
 97   switch (req.type()) {
 98     case ShenandoahAllocRequest::_alloc_tlab:
 99     case ShenandoahAllocRequest::_alloc_shared: {
100 
101       // Try to allocate in the mutator view
102       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
103         if (is_mutator_free(idx)) {
104           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
105           if (result != NULL) {
106             return result;
107           }
108         }
109       }
110 
111       // There is no recovery. Mutator does not touch collector view at all.
112       break;
113     }
114     case ShenandoahAllocRequest::_alloc_gclab:
115     case ShenandoahAllocRequest::_alloc_plab:
116     case ShenandoahAllocRequest::_alloc_shared_gc: {
117       // First try to fit into a region that is already in use in the same generation.
118       HeapWord* result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
119       if (result != NULL) {
120         return result;
121       }
122       // Then try a free region that is dedicated to GC allocations.
123       result = allocate_with_affiliation(FREE, req, in_new_region);
124       if (result != NULL) {
125         return result;
126       }
127 
128       // No dice. Can we borrow space from mutator view?
129       if (!ShenandoahEvacReserveOverflow) {
130         return NULL;
131       }
132 
133       // Try to steal an empty region from the mutator view.
134       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
135         size_t idx = c - 1;
136         if (is_mutator_free(idx)) {
137           ShenandoahHeapRegion* r = _heap->get_region(idx);
138           if (can_allocate_from(r)) {
139             flip_to_gc(r);
140             HeapWord *result = try_allocate_in(r, req, in_new_region);
141             if (result != NULL) {
142               return result;
143             }
144           }
145         }
146       }
147 
148       // No dice. Do not try to mix mutator and GC allocations, because
149       // URWM moves due to GC allocations would expose unparsable mutator
150       // allocations.
151       break;
152     }
153     default:
154       ShouldNotReachHere();
155   }
156   return NULL;
157 }
158 
159 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
160   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
161 
162   if (_heap->is_concurrent_weak_root_in_progress() &&
163       r->is_trash()) {
164     return NULL;
165   }
166 
167   try_recycle_trashed(r);
168 
169   if (r->affiliation() == ShenandoahRegionAffiliation::FREE) {
170     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
171 
172     r->set_affiliation(req.affiliation());
173     r->set_update_watermark(r->bottom());
174 
175     // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
176     // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
177     // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
178     // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
179     // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
180     // coalesce-and-fill processing.  This code is only necessary if req.affiliation() is OLD, but harmless if not.
181     r->end_preemptible_coalesce_and_fill();
182     ctx->capture_top_at_mark_start(r);
183 
184     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
185     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
186 
187     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
188     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
189     log_debug(gc)("NOT clearing bitmap for region " SIZE_FORMAT ", top_bitmap: "
190                   PTR_FORMAT " at transition from FREE to %s",
191                   r->index(), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
192   } else if (r->affiliation() != req.affiliation()) {
193     return NULL;
194   }
195 
196   in_new_region = r->is_empty();
197 
198   HeapWord* result = NULL;
199   size_t size = req.size();
200 
201   // req.size() is in words, free() is in bytes.
202   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
203     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
204       // Need to assure that plabs are aligned on multiple of card region.
205       size_t free = r->free();
206       size_t usable_free = (free / CardTable::card_size) << CardTable::card_shift;
207       free /= HeapWordSize;
208       usable_free /= HeapWordSize;
209       if (size > usable_free) {
210         size = usable_free;
211       }
212       if (size >= req.min_size()) {
213         result = r->allocate_aligned(size, req, CardTable::card_size);
214         if (result != nullptr && free > usable_free) {
215           // Account for the alignment padding
216           size_t padding = (free - usable_free) * HeapWordSize;
217           increase_used(padding);
218           assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
219           _heap->old_generation()->increase_used(padding);
220           // For verification consistency, we need to report this padding to _heap
221           _heap->increase_used(padding);
222         }
223       }
224     } else {
225       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
226       if (size > free) {
227         size = free;
228       }
229       if (size >= req.min_size()) {
230         result = r->allocate(size, req);
231         assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
232       }
233     }
234   } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) {
235     size_t free = r->free();
236     size_t usable_free = (free / CardTable::card_size) << CardTable::card_shift;
237     free /= HeapWordSize;
238     usable_free /= HeapWordSize;
239     if (size <= usable_free) {
240       assert(size % CardTable::card_size_in_words == 0, "PLAB size must be multiple of remembered set card size");
241 
242       result = r->allocate_aligned(size, req, CardTable::card_size);
243       if (result != nullptr) {
244         // Account for the alignment padding
245         size_t padding = (free - usable_free) * HeapWordSize;
246         increase_used(padding);
247         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
248         _heap->old_generation()->increase_used(padding);
249         // For verification consistency, we need to report this padding to _heap
250         _heap->increase_used(padding);
251       }
252     }
253   } else {
254     result = r->allocate(size, req);
255   }
256 
257   if (result != NULL) {
258     // Record actual allocation size
259     req.set_actual_size(size);
260 
261     // Allocation successful, bump stats:
262     if (req.is_mutator_alloc()) {
263       increase_used(size * HeapWordSize);
264     } else if (req.is_gc_alloc()) {
265       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
266       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
267       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
268       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
269       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
270       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
271       // next evacuation pass.
272       r->set_update_watermark(r->top());
273     }
274 
275     if (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
276       _heap->young_generation()->increase_used(size * HeapWordSize);
277     } else if (r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
278       _heap->old_generation()->increase_used(size * HeapWordSize);
279     }
280   }
281 
282   if (result == NULL || has_no_alloc_capacity(r)) {
283     // Region cannot afford this or future allocations. Retire it.
284     //
285     // While this seems a bit harsh, especially in the case when this large allocation does not
286     // fit, but the next small one would, we are risking to inflate scan times when lots of
287     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
288     // TODO: Record first fully-empty region, and use that for large allocations
289 
290     // Record the remainder as allocation waste
291     if (req.is_mutator_alloc()) {
292       size_t waste = r->free();
293       if (waste > 0) {
294         increase_used(waste);
295         _heap->generation_for(req.affiliation())->increase_allocated(waste);
296         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
297       }
298     }
299 
300     size_t num = r->index();
301     _collector_free_bitmap.clear_bit(num);
302     _mutator_free_bitmap.clear_bit(num);
303     // Touched the bounds? Need to update:
304     if (touches_bounds(num)) {
305       adjust_bounds();
306     }
307     assert_bounds();
308   }
309   return result;
310 }
311 
312 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
313   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
314 }
315 
316 void ShenandoahFreeSet::recompute_bounds() {
317   // Reset to the most pessimistic case:
318   _mutator_rightmost = _max - 1;
319   _mutator_leftmost = 0;
320   _collector_rightmost = _max - 1;
321   _collector_leftmost = 0;
322 
323   // ...and adjust from there
324   adjust_bounds();
325 }
326 
327 void ShenandoahFreeSet::adjust_bounds() {
328   // Rewind both mutator bounds until the next bit.
329   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
330     _mutator_leftmost++;
331   }
332   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
333     _mutator_rightmost--;
334   }
335   // Rewind both collector bounds until the next bit.
336   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
337     _collector_leftmost++;
338   }
339   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
340     _collector_rightmost--;
341   }
342 }
343 
344 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
345   shenandoah_assert_heaplocked();
346 
347   size_t words_size = req.size();
348   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
349 
350   // No regions left to satisfy allocation, bye.
351   if (num > mutator_count()) {
352     return NULL;
353   }
354 
355   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
356   // inclusive. Contiguous allocations are biased to the beginning.
357 
358   size_t beg = _mutator_leftmost;
359   size_t end = beg;
360 
361   while (true) {
362     if (end >= _max) {
363       // Hit the end, goodbye
364       return NULL;
365     }
366 
367     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
368     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
369     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
370       end++;
371       beg = end;
372       continue;
373     }
374 
375     if ((end - beg + 1) == num) {
376       // found the match
377       break;
378     }
379 
380     end++;
381   };
382 
383   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
384   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
385 
386   // Initialize regions:
387   for (size_t i = beg; i <= end; i++) {
388     ShenandoahHeapRegion* r = _heap->get_region(i);
389     try_recycle_trashed(r);
390 
391     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
392     assert(r->is_empty(), "Should be empty");
393 
394     if (i == beg) {
395       r->make_humongous_start();
396     } else {
397       r->make_humongous_cont();
398     }
399 
400     // Trailing region may be non-full, record the remainder there
401     size_t used_words;
402     if ((i == end) && (remainder != 0)) {
403       used_words = remainder;
404     } else {
405       used_words = ShenandoahHeapRegion::region_size_words();
406     }
407 
408     r->set_affiliation(req.affiliation());
409     r->set_update_watermark(r->bottom());
410     r->set_top(r->bottom());    // Set top to bottom so we can capture TAMS
411     ctx->capture_top_at_mark_start(r);
412     r->set_top(r->bottom() + used_words); // Then change top to reflect allocation of humongous object.
413     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
414     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
415 
416     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
417     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
418     // ctx->clear_bitmap(r);
419     log_debug(gc)("NOT clearing bitmap for Humongous region [" PTR_FORMAT ", " PTR_FORMAT "], top_bitmap: "
420                   PTR_FORMAT " at transition from FREE to %s",
421                   p2i(r->bottom()), p2i(r->end()), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
422 
423     _mutator_free_bitmap.clear_bit(r->index());
424   }
425 
426   // While individual regions report their true use, all humongous regions are
427   // marked used in the free set.
428   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
429 
430   if (req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
431     _heap->young_generation()->increase_used(words_size * HeapWordSize);
432   } else if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
433     _heap->old_generation()->increase_used(words_size * HeapWordSize);
434   }
435 
436   if (remainder != 0) {
437     // Record this remainder as allocation waste
438     size_t waste = ShenandoahHeapRegion::region_size_words() - remainder;
439     _heap->notify_mutator_alloc_words(waste, true);
440     _heap->generation_for(req.affiliation())->increase_allocated(waste * HeapWordSize);
441   }
442 
443   // Allocated at left/rightmost? Move the bounds appropriately.
444   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
445     adjust_bounds();
446   }
447   assert_bounds();
448 
449   req.set_actual_size(words_size);
450   return _heap->get_region(beg)->bottom();
451 }
452 
453 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
454   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
455 }
456 
457 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
458   if (r->is_trash()) {
459     // This would be recycled on allocation path
460     return ShenandoahHeapRegion::region_size_bytes();
461   } else {
462     return r->free();
463   }
464 }
465 
466 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
467   return alloc_capacity(r) == 0;
468 }
469 
470 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
471   if (r->is_trash()) {
472     _heap->decrease_used(r->used());
473     r->recycle();
474   }
475 }
476 
477 void ShenandoahFreeSet::recycle_trash() {
478   // lock is not reentrable, check we don't have it
479   shenandoah_assert_not_heaplocked();
480 
481   for (size_t i = 0; i < _heap->num_regions(); i++) {
482     ShenandoahHeapRegion* r = _heap->get_region(i);
483     if (r->is_trash()) {
484       ShenandoahHeapLocker locker(_heap->lock());
485       try_recycle_trashed(r);
486     }
487     SpinPause(); // allow allocators to take the lock
488   }
489 }
490 
491 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
492   size_t idx = r->index();
493 
494   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
495   assert(can_allocate_from(r), "Should not be allocated");
496 
497   _mutator_free_bitmap.clear_bit(idx);
498   _collector_free_bitmap.set_bit(idx);
499   _collector_leftmost = MIN2(idx, _collector_leftmost);
500   _collector_rightmost = MAX2(idx, _collector_rightmost);
501 
502   _capacity -= alloc_capacity(r);
503 
504   if (touches_bounds(idx)) {
505     adjust_bounds();
506   }
507   assert_bounds();
508 
509   // We do not ensure that the region is no longer trash,
510   // relying on try_allocate_in(), which always comes next,
511   // to recycle trash before attempting to allocate anything in the region.
512 }
513 
514 void ShenandoahFreeSet::clear() {
515   shenandoah_assert_heaplocked();
516   clear_internal();
517 }
518 
519 void ShenandoahFreeSet::clear_internal() {
520   _mutator_free_bitmap.clear();
521   _collector_free_bitmap.clear();
522   _mutator_leftmost = _max;
523   _mutator_rightmost = 0;
524   _collector_leftmost = _max;
525   _collector_rightmost = 0;
526   _capacity = 0;
527   _used = 0;
528 }
529 
530 void ShenandoahFreeSet::rebuild() {
531   shenandoah_assert_heaplocked();
532   clear();
533 
534   log_debug(gc)("Rebuilding FreeSet");
535   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
536     ShenandoahHeapRegion* region = _heap->get_region(idx);
537     if (region->is_alloc_allowed() || region->is_trash()) {
538       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
539 
540       // Do not add regions that would surely fail allocation
541       if (has_no_alloc_capacity(region)) continue;
542 
543       _capacity += alloc_capacity(region);
544       assert(_used <= _capacity, "must not use more than we have");
545 
546       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
547       _mutator_free_bitmap.set_bit(idx);
548 
549       log_debug(gc)("  Setting Region " SIZE_FORMAT " _mutator_free_bitmap bit to true", idx);
550     }
551   }
552 
553   // Evac reserve: reserve trailing space for evacuations
554   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
555   size_t reserved = 0;
556 
557   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
558     if (reserved >= to_reserve) break;
559 
560     ShenandoahHeapRegion* region = _heap->get_region(idx);
561     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
562       _mutator_free_bitmap.clear_bit(idx);
563       _collector_free_bitmap.set_bit(idx);
564       size_t ac = alloc_capacity(region);
565       _capacity -= ac;
566       reserved += ac;
567       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
568     }
569   }
570 
571   recompute_bounds();
572   assert_bounds();
573 }
574 
575 void ShenandoahFreeSet::log_status() {
576   shenandoah_assert_heaplocked();
577 
578   LogTarget(Info, gc, ergo) lt;
579   if (lt.is_enabled()) {
580     ResourceMark rm;
581     LogStream ls(lt);
582 
583     {
584       size_t last_idx = 0;
585       size_t max = 0;
586       size_t max_contig = 0;
587       size_t empty_contig = 0;
588 
589       size_t total_used = 0;
590       size_t total_free = 0;
591       size_t total_free_ext = 0;
592 
593       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
594         if (is_mutator_free(idx)) {
595           ShenandoahHeapRegion *r = _heap->get_region(idx);
596           size_t free = alloc_capacity(r);
597 
598           max = MAX2(max, free);
599 
600           if (r->is_empty()) {
601             total_free_ext += free;
602             if (last_idx + 1 == idx) {
603               empty_contig++;
604             } else {
605               empty_contig = 1;
606             }
607           } else {
608             empty_contig = 0;
609           }
610 
611           total_used += r->used();
612           total_free += free;
613 
614           max_contig = MAX2(max_contig, empty_contig);
615           last_idx = idx;
616         }
617       }
618 
619       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
620       size_t free = capacity() - used();
621 
622       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
623                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
624                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
625                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
626       );
627 
628       ls.print("Frag: ");
629       size_t frag_ext;
630       if (total_free_ext > 0) {
631         frag_ext = 100 - (100 * max_humongous / total_free_ext);
632       } else {
633         frag_ext = 0;
634       }
635       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
636 
637       size_t frag_int;
638       if (mutator_count() > 0) {
639         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
640       } else {
641         frag_int = 0;
642       }
643       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
644     }
645 
646     {
647       size_t max = 0;
648       size_t total_free = 0;
649 
650       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
651         if (is_collector_free(idx)) {
652           ShenandoahHeapRegion *r = _heap->get_region(idx);
653           size_t free = alloc_capacity(r);
654           max = MAX2(max, free);
655           total_free += free;
656         }
657       }
658 
659       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
660                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
661                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
662     }
663   }
664 }
665 
666 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
667   shenandoah_assert_heaplocked();
668   assert_bounds();
669 
670   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
671     switch (req.type()) {
672       case ShenandoahAllocRequest::_alloc_shared:
673       case ShenandoahAllocRequest::_alloc_shared_gc:
674         in_new_region = true;
675         return allocate_contiguous(req);
676       case ShenandoahAllocRequest::_alloc_plab:
677       case ShenandoahAllocRequest::_alloc_gclab:
678       case ShenandoahAllocRequest::_alloc_tlab:
679         in_new_region = false;
680         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
681                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
682         return NULL;
683       default:
684         ShouldNotReachHere();
685         return NULL;
686     }
687   } else {
688     return allocate_single(req, in_new_region);
689   }
690 }
691 
692 size_t ShenandoahFreeSet::unsafe_peek_free() const {
693   // Deliberately not locked, this method is unsafe when free set is modified.
694 
695   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
696     if (index < _max && is_mutator_free(index)) {
697       ShenandoahHeapRegion* r = _heap->get_region(index);
698       if (r->free() >= MinTLABSize) {
699         return r->free();
700       }
701     }
702   }
703 
704   // It appears that no regions left
705   return 0;
706 }
707 
708 void ShenandoahFreeSet::print_on(outputStream* out) const {
709   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
710   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
711     if (is_mutator_free(index)) {
712       _heap->get_region(index)->print_on(out);
713     }
714   }
715   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
716   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
717     if (is_collector_free(index)) {
718       _heap->get_region(index)->print_on(out);
719     }
720   }
721 }
722 
723 /*
724  * Internal fragmentation metric: describes how fragmented the heap regions are.
725  *
726  * It is derived as:
727  *
728  *               sum(used[i]^2, i=0..k)
729  *   IF = 1 - ------------------------------
730  *              C * sum(used[i], i=0..k)
731  *
732  * ...where k is the number of regions in computation, C is the region capacity, and
733  * used[i] is the used space in the region.
734  *
735  * The non-linearity causes IF to be lower for the cases where the same total heap
736  * used is densely packed. For example:
737  *   a) Heap is completely full  => IF = 0
738  *   b) Heap is half full, first 50% regions are completely full => IF = 0
739  *   c) Heap is half full, each region is 50% full => IF = 1/2
740  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
741  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
742  *   f) Heap has one small object per each region => IF =~ 1
743  */
744 double ShenandoahFreeSet::internal_fragmentation() {
745   double squared = 0;
746   double linear = 0;
747   int count = 0;
748 
749   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
750     if (is_mutator_free(index)) {
751       ShenandoahHeapRegion* r = _heap->get_region(index);
752       size_t used = r->used();
753       squared += used * used;
754       linear += used;
755       count++;
756     }
757   }
758 
759   if (count > 0) {
760     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
761     return 1 - s;
762   } else {
763     return 0;
764   }
765 }
766 
767 /*
768  * External fragmentation metric: describes how fragmented the heap is.
769  *
770  * It is derived as:
771  *
772  *   EF = 1 - largest_contiguous_free / total_free
773  *
774  * For example:
775  *   a) Heap is completely empty => EF = 0
776  *   b) Heap is completely full => EF = 0
777  *   c) Heap is first-half full => EF = 1/2
778  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
779  */
780 double ShenandoahFreeSet::external_fragmentation() {
781   size_t last_idx = 0;
782   size_t max_contig = 0;
783   size_t empty_contig = 0;
784 
785   size_t free = 0;
786 
787   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
788     if (is_mutator_free(index)) {
789       ShenandoahHeapRegion* r = _heap->get_region(index);
790       if (r->is_empty()) {
791         free += ShenandoahHeapRegion::region_size_bytes();
792         if (last_idx + 1 == index) {
793           empty_contig++;
794         } else {
795           empty_contig = 1;
796         }
797       } else {
798         empty_contig = 0;
799       }
800 
801       max_contig = MAX2(max_contig, empty_contig);
802       last_idx = index;
803     }
804   }
805 
806   if (free > 0) {
807     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
808   } else {
809     return 0;
810   }
811 }
812 
813 #ifdef ASSERT
814 void ShenandoahFreeSet::assert_bounds() const {
815   // Performance invariants. Failing these would not break the free set, but performance
816   // would suffer.
817   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
818   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
819 
820   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
821   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
822 
823   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
824   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
825   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
826   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
827 
828   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
829   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
830 
831   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
832   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
833 
834   beg_off = _collector_free_bitmap.get_next_one_offset(0);
835   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
836   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
837   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
838 }
839 #endif