1 /*
   2  * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  27 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  28 
  29 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap,size_t max_regions) :
  30         _heap(heap),
  31         _mutator_free_bitmap(max_regions, /* in_resource_area = */ false),
  32         _collector_free_bitmap(max_regions, /* in_resource_area = */ false),
  33         _max(max_regions)
  34 {
  35   clear_internal();
  36 }
  37 
  38 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  39   shenandoah_assert_heaplocked();
  40   _used += num_bytes;
  41 
  42   assert(_used <= _capacity, err_msg("must not use more than we have: used: " SIZE_FORMAT
  43                                      ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT,
  44                                      _used, _capacity, num_bytes));
  45 }
  46 
  47 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  48   assert (idx < _max,
  49           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  50                   idx, _max, _mutator_leftmost, _mutator_rightmost));
  51   return _mutator_free_bitmap.at(idx);
  52 }
  53 
  54 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  55   assert (idx < _max,
  56           err_msg("index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  57                   idx, _max, _collector_leftmost, _collector_rightmost));
  58   return _collector_free_bitmap.at(idx);
  59 }
  60 
  61 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
  62   // Scan the bitmap looking for a first fit.
  63   //
  64   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  65   // we would find the region to allocate at right away.
  66   //
  67   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  68   // go to the end. This makes application allocation faster, because we would clear lots
  69   // of regions from the beginning most of the time.
  70   //
  71   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  72   // unless we special cases for stealing and mixed allocations.
  73 
  74   switch (req.type()) {
  75     case ShenandoahAllocRequest::_alloc_tlab:
  76     case ShenandoahAllocRequest::_alloc_shared: {
  77 
  78       // Try to allocate in the mutator view
  79       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  80         if (is_mutator_free(idx)) {
  81           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  82           if (result != NULL) {
  83             return result;
  84           }
  85         }
  86       }
  87 
  88       // There is no recovery. Mutator does not touch collector view at all.
  89       break;
  90     }
  91     case ShenandoahAllocRequest::_alloc_gclab:
  92     case ShenandoahAllocRequest::_alloc_shared_gc: {
  93       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  94 
  95       // Fast-path: try to allocate in the collector view first
  96       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
  97         size_t idx = c - 1;
  98         if (is_collector_free(idx)) {
  99           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 100           if (result != NULL) {
 101             return result;
 102           }
 103         }
 104       }
 105 
 106       // No dice. Can we borrow space from mutator view?
 107       if (!ShenandoahEvacReserveOverflow) {
 108         return NULL;
 109       }
 110 
 111       // Try to steal the empty region from the mutator view
 112       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 113         size_t idx = c - 1;
 114         if (is_mutator_free(idx)) {
 115           ShenandoahHeapRegion* r = _heap->get_region(idx);
 116           if (is_empty_or_trash(r)) {
 117             flip_to_gc(r);
 118             HeapWord *result = try_allocate_in(r, req, in_new_region);
 119             if (result != NULL) {
 120               return result;
 121             }
 122           }
 123         }
 124       }
 125 
 126       // No dice. Do not try to mix mutator and GC allocations, because
 127       // URWM moves due to GC allocations would expose unparsable mutator
 128       // allocations.
 129 
 130       break;
 131     }
 132     default:
 133       ShouldNotReachHere();
 134   }
 135 
 136   return NULL;
 137 }
 138 
 139 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 140   assert (!has_no_alloc_capacity(r), err_msg("Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()));
 141 
 142   try_recycle_trashed(r);
 143 
 144   in_new_region = r->is_empty();
 145 
 146   HeapWord* result = NULL;
 147   size_t size = req.size();
 148 
 149   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 150     size_t free = align_size_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 151     if (size > free) {
 152       size = free;
 153     }
 154     if (size >= req.min_size()) {
 155       result = r->allocate(size, req.type());
 156       assert (result != NULL, err_msg("Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size));
 157     }
 158   } else {
 159     result = r->allocate(size, req.type());
 160   }
 161 
 162   if (result != NULL) {
 163     // Allocation successful, bump stats:
 164     if (req.is_mutator_alloc()) {
 165       increase_used(size * HeapWordSize);
 166     }
 167 
 168     // Record actual allocation size
 169     req.set_actual_size(size);
 170 
 171     if (req.is_gc_alloc()) {
 172       r->set_update_watermark(r->top());
 173     }
 174   }
 175 
 176   if (result == NULL || has_no_alloc_capacity(r)) {
 177     // Region cannot afford this or future allocations. Retire it.
 178     //
 179     // While this seems a bit harsh, especially in the case when this large allocation does not
 180     // fit, but the next small one would, we are risking to inflate scan times when lots of
 181     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 182     // TODO: Record first fully-empty region, and use that for large allocations
 183 
 184     // Record the remainder as allocation waste
 185     if (req.is_mutator_alloc()) {
 186       size_t waste = r->free();
 187       if (waste > 0) {
 188         increase_used(waste);
 189         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
 190       }
 191     }
 192 
 193     size_t num = r->index();
 194     _collector_free_bitmap.clear_bit(num);
 195     _mutator_free_bitmap.clear_bit(num);
 196     // Touched the bounds? Need to update:
 197     if (touches_bounds(num)) {
 198       adjust_bounds();
 199     }
 200     assert_bounds();
 201   }
 202   return result;
 203 }
 204 
 205 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 206   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 207 }
 208 
 209 void ShenandoahFreeSet::recompute_bounds() {
 210   // Reset to the most pessimistic case:
 211   _mutator_rightmost = _max - 1;
 212   _mutator_leftmost = 0;
 213   _collector_rightmost = _max - 1;
 214   _collector_leftmost = 0;
 215 
 216   // ...and adjust from there
 217   adjust_bounds();
 218 }
 219 
 220 void ShenandoahFreeSet::adjust_bounds() {
 221   // Rewind both mutator bounds until the next bit.
 222   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 223     _mutator_leftmost++;
 224   }
 225   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 226     _mutator_rightmost--;
 227   }
 228   // Rewind both collector bounds until the next bit.
 229   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 230     _collector_leftmost++;
 231   }
 232   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 233     _collector_rightmost--;
 234   }
 235 }
 236 
 237 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 238   shenandoah_assert_heaplocked();
 239 
 240   size_t words_size = req.size();
 241   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 242 
 243   // No regions left to satisfy allocation, bye.
 244   if (num > mutator_count()) {
 245     return NULL;
 246   }
 247 
 248   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 249   // inclusive. Contiguous allocations are biased to the beginning.
 250 
 251   size_t beg = _mutator_leftmost;
 252   size_t end = beg;
 253 
 254   while (true) {
 255     if (end >= _max) {
 256       // Hit the end, goodbye
 257       return NULL;
 258     }
 259 
 260     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 261     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 262     if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) {
 263       end++;
 264       beg = end;
 265       continue;
 266     }
 267 
 268     if ((end - beg + 1) == num) {
 269       // found the match
 270       break;
 271     }
 272 
 273     end++;
 274   };
 275 
 276   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 277 
 278   // Initialize regions:
 279   for (size_t i = beg; i <= end; i++) {
 280     ShenandoahHeapRegion* r = _heap->get_region(i);
 281     try_recycle_trashed(r);
 282 
 283     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 284     assert(r->is_empty(), "Should be empty");
 285 
 286     if (i == beg) {
 287       r->make_humongous_start();
 288     } else {
 289       r->make_humongous_cont();
 290     }
 291 
 292     // Trailing region may be non-full, record the remainder there
 293     size_t used_words;
 294     if ((i == end) && (remainder != 0)) {
 295       used_words = remainder;
 296     } else {
 297       used_words = ShenandoahHeapRegion::region_size_words();
 298     }
 299 
 300     r->set_top(r->bottom() + used_words);
 301 
 302     _mutator_free_bitmap.clear_bit(r->index());
 303   }
 304 
 305   // While individual regions report their true use, all humongous regions are
 306   // marked used in the free set.
 307   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 308 
 309   if (remainder != 0) {
 310     // Record this remainder as allocation waste
 311     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 312   }
 313 
 314   // Allocated at left/rightmost? Move the bounds appropriately.
 315   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 316     adjust_bounds();
 317   }
 318   assert_bounds();
 319 
 320   req.set_actual_size(words_size);
 321   return _heap->get_region(beg)->bottom();
 322 }
 323 
 324 bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) {
 325   return r->is_empty() || r->is_trash();
 326 }
 327 
 328 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 329   if (r->is_trash()) {
 330     // This would be recycled on allocation path
 331     return ShenandoahHeapRegion::region_size_bytes();
 332   } else {
 333     return r->free();
 334   }
 335 }
 336 
 337 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 338   return alloc_capacity(r) == 0;
 339 }
 340 
 341 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 342   if (r->is_trash()) {
 343     _heap->decrease_used(r->used());
 344     r->recycle();
 345   }
 346 }
 347 
 348 void ShenandoahFreeSet::recycle_trash() {
 349   // lock is not reentrable, check we don't have it
 350   shenandoah_assert_not_heaplocked();
 351 
 352   for (size_t i = 0; i < _heap->num_regions(); i++) {
 353     ShenandoahHeapRegion* r = _heap->get_region(i);
 354     if (r->is_trash()) {
 355       ShenandoahHeapLocker locker(_heap->lock());
 356       try_recycle_trashed(r);
 357     }
 358     SpinPause(); // allow allocators to take the lock
 359   }
 360 }
 361 
 362 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 363   size_t idx = r->index();
 364 
 365   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 366   assert(is_empty_or_trash(r), "Should not be allocated");
 367 
 368   _mutator_free_bitmap.clear_bit(idx);
 369   _collector_free_bitmap.set_bit(idx);
 370   _collector_leftmost = MIN2(idx, _collector_leftmost);
 371   _collector_rightmost = MAX2(idx, _collector_rightmost);
 372 
 373   _capacity -= alloc_capacity(r);
 374 
 375   if (touches_bounds(idx)) {
 376     adjust_bounds();
 377   }
 378   assert_bounds();
 379 }
 380 
 381 void ShenandoahFreeSet::clear() {
 382   shenandoah_assert_heaplocked();
 383   clear_internal();
 384 }
 385 
 386 void ShenandoahFreeSet::clear_internal() {
 387   _mutator_free_bitmap.clear();
 388   _collector_free_bitmap.clear();
 389   _mutator_leftmost = _max;
 390   _mutator_rightmost = 0;
 391   _collector_leftmost = _max;
 392   _collector_rightmost = 0;
 393   _capacity = 0;
 394   _used = 0;
 395 }
 396 
 397 void ShenandoahFreeSet::rebuild() {
 398   shenandoah_assert_heaplocked();
 399   clear();
 400 
 401   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 402     ShenandoahHeapRegion* region = _heap->get_region(idx);
 403     if (region->is_alloc_allowed() || region->is_trash()) {
 404       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
 405 
 406       // Do not add regions that would surely fail allocation
 407       if (has_no_alloc_capacity(region)) continue;
 408 
 409       _capacity += alloc_capacity(region);
 410       assert(_used <= _capacity, "must not use more than we have");
 411 
 412       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 413       _mutator_free_bitmap.set_bit(idx);
 414     }
 415   }
 416 
 417   // Evac reserve: reserve trailing space for evacuations
 418   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
 419   size_t reserved = 0;
 420 
 421   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 422     if (reserved >= to_reserve) break;
 423 
 424     ShenandoahHeapRegion* region = _heap->get_region(idx);
 425     if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) {
 426       _mutator_free_bitmap.clear_bit(idx);
 427       _collector_free_bitmap.set_bit(idx);
 428       size_t ac = alloc_capacity(region);
 429       _capacity -= ac;
 430       reserved += ac;
 431     }
 432   }
 433 
 434   recompute_bounds();
 435   assert_bounds();
 436 }
 437 
 438 void ShenandoahFreeSet::log_status() {
 439   shenandoah_assert_heaplocked();
 440 
 441   if (ShenandoahLogInfo || PrintGCDetails) {
 442     ResourceMark rm;
 443     outputStream* ls = gclog_or_tty;
 444 
 445     {
 446       size_t last_idx = 0;
 447       size_t max = 0;
 448       size_t max_contig = 0;
 449       size_t empty_contig = 0;
 450 
 451       size_t total_used = 0;
 452       size_t total_free = 0;
 453       size_t total_free_ext = 0;
 454 
 455       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 456         if (is_mutator_free(idx)) {
 457           ShenandoahHeapRegion *r = _heap->get_region(idx);
 458           size_t free = alloc_capacity(r);
 459 
 460           max = MAX2(max, free);
 461 
 462           if (r->is_empty()) {
 463             total_free_ext += free;
 464             if (last_idx + 1 == idx) {
 465               empty_contig++;
 466             } else {
 467               empty_contig = 1;
 468             }
 469           } else {
 470             empty_contig = 0;
 471           }
 472 
 473           total_used += r->used();
 474           total_free += free;
 475 
 476           max_contig = MAX2(max_contig, empty_contig);
 477           last_idx = idx;
 478         }
 479       }
 480 
 481       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 482 
 483       ls->print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
 484                 byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
 485                 byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
 486                 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
 487       );
 488 
 489       ls->print("Frag: ");
 490       size_t frag_ext;
 491       if (total_free_ext > 0) {
 492         frag_ext = 100 - (100 * max_humongous / total_free_ext);
 493       } else {
 494         frag_ext = 0;
 495       }
 496       ls->print(SIZE_FORMAT "%% external, ", frag_ext);
 497 
 498       size_t frag_int;
 499       if (mutator_count() > 0) {
 500         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 501       } else {
 502         frag_int = 0;
 503       }
 504       ls->print(SIZE_FORMAT "%% internal; ", frag_int);
 505     }
 506 
 507     {
 508       size_t max = 0;
 509       size_t total_free = 0;
 510 
 511       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 512         if (is_collector_free(idx)) {
 513           ShenandoahHeapRegion *r = _heap->get_region(idx);
 514           size_t free = alloc_capacity(r);
 515           max = MAX2(max, free);
 516           total_free += free;
 517         }
 518       }
 519 
 520       ls->print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
 521                    byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
 522                    byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
 523     }
 524   }
 525 }
 526 
 527 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
 528   shenandoah_assert_heaplocked();
 529   assert_bounds();
 530 
 531   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 532     switch (req.type()) {
 533       case ShenandoahAllocRequest::_alloc_shared:
 534       case ShenandoahAllocRequest::_alloc_shared_gc:
 535         in_new_region = true;
 536         return allocate_contiguous(req);
 537       case ShenandoahAllocRequest::_alloc_gclab:
 538       case ShenandoahAllocRequest::_alloc_tlab:
 539         in_new_region = false;
 540         assert(false, err_msg("Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 541                               req.size(), ShenandoahHeapRegion::humongous_threshold_words()));
 542         return NULL;
 543       default:
 544         ShouldNotReachHere();
 545         return NULL;
 546     }
 547   } else {
 548     return allocate_single(req, in_new_region);
 549   }
 550 }
 551 
 552 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 553   // Deliberately not locked, this method is unsafe when free set is modified.
 554 
 555   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 556     if (index < _max && is_mutator_free(index)) {
 557       ShenandoahHeapRegion* r = _heap->get_region(index);
 558       if (r->free() >= MinTLABSize) {
 559         return r->free();
 560       }
 561     }
 562   }
 563 
 564   // It appears that no regions left
 565   return 0;
 566 }
 567 
 568 void ShenandoahFreeSet::print_on(outputStream* out) const {
 569   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 570   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 571     if (is_mutator_free(index)) {
 572       _heap->get_region(index)->print_on(out);
 573     }
 574   }
 575   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 576   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 577     if (is_collector_free(index)) {
 578       _heap->get_region(index)->print_on(out);
 579     }
 580   }
 581 }
 582 
 583 /*
 584  * Internal fragmentation metric: describes how fragmented the heap regions are.
 585  *
 586  * It is derived as:
 587  *
 588  *               sum(used[i]^2, i=0..k)
 589  *   IF = 1 - ------------------------------
 590  *              C * sum(used[i], i=0..k)
 591  *
 592  * ...where k is the number of regions in computation, C is the region capacity, and
 593  * used[i] is the used space in the region.
 594  *
 595  * The non-linearity causes IF to be lower for the cases where the same total heap
 596  * used is densely packed. For example:
 597  *   a) Heap is completely full  => IF = 0
 598  *   b) Heap is half full, first 50% regions are completely full => IF = 0
 599  *   c) Heap is half full, each region is 50% full => IF = 1/2
 600  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
 601  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
 602  *   f) Heap has one small object per each region => IF =~ 1
 603  */
 604 double ShenandoahFreeSet::internal_fragmentation() {
 605   double squared = 0;
 606   double linear = 0;
 607   int count = 0;
 608 
 609   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 610     if (is_mutator_free(index)) {
 611       ShenandoahHeapRegion* r = _heap->get_region(index);
 612       size_t used = r->used();
 613       squared += used * used;
 614       linear += used;
 615       count++;
 616     }
 617   }
 618 
 619   if (count > 0) {
 620     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
 621     return 1 - s;
 622   } else {
 623     return 0;
 624   }
 625 }
 626 
 627 /*
 628  * External fragmentation metric: describes how fragmented the heap is.
 629  *
 630  * It is derived as:
 631  *
 632  *   EF = 1 - largest_contiguous_free / total_free
 633  *
 634  * For example:
 635  *   a) Heap is completely empty => EF = 0
 636  *   b) Heap is completely full => EF = 0
 637  *   c) Heap is first-half full => EF = 1/2
 638  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
 639  */
 640 double ShenandoahFreeSet::external_fragmentation() {
 641   size_t last_idx = 0;
 642   size_t max_contig = 0;
 643   size_t empty_contig = 0;
 644 
 645   size_t free = 0;
 646 
 647   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 648     if (is_mutator_free(index)) {
 649       ShenandoahHeapRegion* r = _heap->get_region(index);
 650       if (r->is_empty()) {
 651         free += ShenandoahHeapRegion::region_size_bytes();
 652         if (last_idx + 1 == index) {
 653           empty_contig++;
 654         } else {
 655           empty_contig = 1;
 656         }
 657       } else {
 658         empty_contig = 0;
 659       }
 660 
 661       max_contig = MAX2(max_contig, empty_contig);
 662       last_idx = index;
 663     }
 664   }
 665 
 666   if (free > 0) {
 667     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
 668   } else {
 669     return 0;
 670   }
 671 }
 672 
 673 #ifdef ASSERT
 674 void ShenandoahFreeSet::assert_bounds() const {
 675   // Performance invariants. Failing these would not break the free set, but performance
 676   // would suffer.
 677   assert (_mutator_leftmost <= _max, err_msg("leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max));
 678   assert (_mutator_rightmost < _max, err_msg("rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max));
 679 
 680   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  err_msg("leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost));
 681   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), err_msg("rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost));
 682 
 683   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
 684   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
 685   assert (beg_off >= _mutator_leftmost, err_msg("free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost));
 686   assert (end_off == _max,      err_msg("free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost));
 687 
 688   assert (_collector_leftmost <= _max, err_msg("leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max));
 689   assert (_collector_rightmost < _max, err_msg("rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max));
 690 
 691   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  err_msg("leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost));
 692   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), err_msg("rightmost region should be free: " SIZE_FORMAT, _collector_rightmost));
 693 
 694   beg_off = _collector_free_bitmap.get_next_one_offset(0);
 695   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
 696   assert (beg_off >= _collector_leftmost, err_msg("free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost));
 697   assert (end_off == _max,      err_msg("free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost));
 698 }
 699 #endif