1 /*
   2  * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  28 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  29 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  30 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  31 #include "logging/logStream.hpp"
  32 
  33 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
  34   _heap(heap),
  35   _mutator_free_bitmap(max_regions, mtGC),
  36   _collector_free_bitmap(max_regions, mtGC),
  37   _max(max_regions)
  38 {
  39   clear_internal();
  40 }
  41 
  42 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  43   assert_heaplock_owned_by_current_thread();
  44   _used += num_bytes;
  45 
  46   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
  47          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
  48 }
  49 
  50 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  51   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  52           idx, _max, _mutator_leftmost, _mutator_rightmost);
  53   return _mutator_free_bitmap.at(idx);
  54 }
  55 
  56 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  57   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  58           idx, _max, _collector_leftmost, _collector_rightmost);
  59   return _collector_free_bitmap.at(idx);
  60 }
  61 
  62 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
  63   // Scan the bitmap looking for a first fit.
  64   //
  65   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  66   // we would find the region to allocate at right away.
  67   //
  68   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  69   // go to the end. This makes application allocation faster, because we would clear lots
  70   // of regions from the beginning most of the time.
  71   //
  72   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  73   // unless we special cases for stealing and mixed allocations.
  74 
  75   switch (req.type()) {
  76     case ShenandoahAllocRequest::_alloc_tlab:
  77     case ShenandoahAllocRequest::_alloc_shared: {
  78 
  79       // Try to allocate in the mutator view
  80       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  81         if (is_mutator_free(idx)) {
  82           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  83           if (result != NULL) {
  84             return result;
  85           }
  86         }
  87       }
  88 
  89       // There is no recovery. Mutator does not touch collector view at all.
  90       break;
  91     }
  92     case ShenandoahAllocRequest::_alloc_gclab:
  93     case ShenandoahAllocRequest::_alloc_shared_gc: {
  94       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  95 
  96       // Fast-path: try to allocate in the collector view first
  97       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
  98         size_t idx = c - 1;
  99         if (is_collector_free(idx)) {
 100           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 101           if (result != NULL) {
 102             return result;
 103           }
 104         }
 105       }
 106 
 107       // No dice. Can we borrow space from mutator view?
 108       if (!ShenandoahEvacReserveOverflow) {
 109         return NULL;
 110       }
 111 
 112       // Try to steal the empty region from the mutator view
 113       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 114         size_t idx = c - 1;
 115         if (is_mutator_free(idx)) {
 116           ShenandoahHeapRegion* r = _heap->get_region(idx);
 117           if (is_empty_or_trash(r)) {
 118             flip_to_gc(r);
 119             HeapWord *result = try_allocate_in(r, req, in_new_region);
 120             if (result != NULL) {
 121               return result;
 122             }
 123           }
 124         }
 125       }
 126 
 127       // Try to mix the allocation into the mutator view:
 128       if (ShenandoahAllowMixedAllocs) {
 129         for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 130           size_t idx = c - 1;
 131           if (is_mutator_free(idx)) {
 132             HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 133             if (result != NULL) {
 134               return result;
 135             }
 136           }
 137         }
 138       }
 139       break;
 140     }
 141     default:
 142       ShouldNotReachHere();
 143   }
 144 
 145   return NULL;
 146 }
 147 
 148 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 149   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number());
 150 
 151   if (_heap->is_concurrent_root_in_progress() &&
 152       r->is_trash()) {
 153     return NULL;
 154   }
 155 
 156   try_recycle_trashed(r);
 157 
 158   in_new_region = r->is_empty();
 159 
 160   HeapWord* result = NULL;
 161   size_t size = req.size();
 162 
 163   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 164     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 165     if (size > free) {
 166       size = free;
 167     }
 168     if (size >= req.min_size()) {
 169       result = r->allocate(size, req.type());
 170       assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
 171     }
 172   } else {
 173     result = r->allocate(size, req.type());
 174   }
 175 
 176   if (result != NULL) {
 177     // Allocation successful, bump stats:
 178     if (req.is_mutator_alloc()) {
 179       increase_used(size * HeapWordSize);
 180     }
 181 
 182     // Record actual allocation size
 183     req.set_actual_size(size);
 184 
 185     if (req.is_gc_alloc() && _heap->is_concurrent_traversal_in_progress()) {
 186       // Traversal needs to traverse through GC allocs. Adjust TAMS to the new top
 187       // so that these allocations appear below TAMS, and thus get traversed.
 188       // See top of shenandoahTraversal.cpp for an explanation.
 189       _heap->marking_context()->capture_top_at_mark_start(r);
 190       _heap->traversal_gc()->traversal_set()->add_region_check_for_duplicates(r);
 191       OrderAccess::fence();
 192     }
 193   }
 194 
 195   if (result == NULL || has_no_alloc_capacity(r)) {
 196     // Region cannot afford this or future allocations. Retire it.
 197     //
 198     // While this seems a bit harsh, especially in the case when this large allocation does not
 199     // fit, but the next small one would, we are risking to inflate scan times when lots of
 200     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 201     // TODO: Record first fully-empty region, and use that for large allocations
 202 
 203     // Record the remainder as allocation waste
 204     if (req.is_mutator_alloc()) {
 205       size_t waste = r->free();
 206       if (waste > 0) {
 207         increase_used(waste);
 208         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
 209       }
 210     }
 211 
 212     size_t num = r->region_number();
 213     _collector_free_bitmap.clear_bit(num);
 214     _mutator_free_bitmap.clear_bit(num);
 215     // Touched the bounds? Need to update:
 216     if (touches_bounds(num)) {
 217       adjust_bounds();
 218     }
 219     assert_bounds();
 220   }
 221   return result;
 222 }
 223 
 224 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 225   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 226 }
 227 
 228 void ShenandoahFreeSet::recompute_bounds() {
 229   // Reset to the most pessimistic case:
 230   _mutator_rightmost = _max - 1;
 231   _mutator_leftmost = 0;
 232   _collector_rightmost = _max - 1;
 233   _collector_leftmost = 0;
 234 
 235   // ...and adjust from there
 236   adjust_bounds();
 237 }
 238 
 239 void ShenandoahFreeSet::adjust_bounds() {
 240   // Rewind both mutator bounds until the next bit.
 241   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 242     _mutator_leftmost++;
 243   }
 244   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 245     _mutator_rightmost--;
 246   }
 247   // Rewind both collector bounds until the next bit.
 248   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 249     _collector_leftmost++;
 250   }
 251   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 252     _collector_rightmost--;
 253   }
 254 }
 255 
 256 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 257   assert_heaplock_owned_by_current_thread();
 258 
 259   size_t words_size = req.size();
 260   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 261 
 262   // No regions left to satisfy allocation, bye.
 263   if (num > mutator_count()) {
 264     return NULL;
 265   }
 266 
 267   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 268   // inclusive. Contiguous allocations are biased to the beginning.
 269 
 270   size_t beg = _mutator_leftmost;
 271   size_t end = beg;
 272 
 273   while (true) {
 274     if (end >= _max) {
 275       // Hit the end, goodbye
 276       return NULL;
 277     }
 278 
 279     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 280     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 281     if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) {
 282       end++;
 283       beg = end;
 284       continue;
 285     }
 286 
 287     if ((end - beg + 1) == num) {
 288       // found the match
 289       break;
 290     }
 291 
 292     end++;
 293   };
 294 
 295   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 296 
 297   // Initialize regions:
 298   for (size_t i = beg; i <= end; i++) {
 299     ShenandoahHeapRegion* r = _heap->get_region(i);
 300     try_recycle_trashed(r);
 301 
 302     assert(i == beg || _heap->get_region(i-1)->region_number() + 1 == r->region_number(), "Should be contiguous");
 303     assert(r->is_empty(), "Should be empty");
 304 
 305     if (i == beg) {
 306       r->make_humongous_start();
 307     } else {
 308       r->make_humongous_cont();
 309     }
 310 
 311     // Trailing region may be non-full, record the remainder there
 312     size_t used_words;
 313     if ((i == end) && (remainder != 0)) {
 314       used_words = remainder;
 315     } else {
 316       used_words = ShenandoahHeapRegion::region_size_words();
 317     }
 318 
 319     r->set_top(r->bottom() + used_words);
 320     r->reset_alloc_metadata_to_shared();
 321 
 322     _mutator_free_bitmap.clear_bit(r->region_number());
 323   }
 324 
 325   // While individual regions report their true use, all humongous regions are
 326   // marked used in the free set.
 327   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 328 
 329   if (remainder != 0) {
 330     // Record this remainder as allocation waste
 331     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 332   }
 333 
 334   // Allocated at left/rightmost? Move the bounds appropriately.
 335   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 336     adjust_bounds();
 337   }
 338   assert_bounds();
 339 
 340   req.set_actual_size(words_size);
 341   return _heap->get_region(beg)->bottom();
 342 }
 343 
 344 bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) {
 345   return r->is_empty() || r->is_trash();
 346 }
 347 
 348 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 349   if (r->is_trash()) {
 350     // This would be recycled on allocation path
 351     return ShenandoahHeapRegion::region_size_bytes();
 352   } else {
 353     return r->free();
 354   }
 355 }
 356 
 357 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 358   return alloc_capacity(r) == 0;
 359 }
 360 
 361 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 362   if (r->is_trash()) {
 363     _heap->decrease_used(r->used());
 364     r->recycle();
 365   }
 366 }
 367 
 368 void ShenandoahFreeSet::recycle_trash() {
 369   // lock is not reentrable, check we don't have it
 370   assert_heaplock_not_owned_by_current_thread();
 371 
 372   for (size_t i = 0; i < _heap->num_regions(); i++) {
 373     ShenandoahHeapRegion* r = _heap->get_region(i);
 374     if (r->is_trash()) {
 375       ShenandoahHeapLocker locker(_heap->lock());
 376       try_recycle_trashed(r);
 377     }
 378     SpinPause(); // allow allocators to take the lock
 379   }
 380 }
 381 
 382 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 383   size_t idx = r->region_number();
 384 
 385   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 386   assert(is_empty_or_trash(r), "Should not be allocated");
 387 
 388   _mutator_free_bitmap.clear_bit(idx);
 389   _collector_free_bitmap.set_bit(idx);
 390   _collector_leftmost = MIN2(idx, _collector_leftmost);
 391   _collector_rightmost = MAX2(idx, _collector_rightmost);
 392 
 393   _capacity -= alloc_capacity(r);
 394 
 395   if (touches_bounds(idx)) {
 396     adjust_bounds();
 397   }
 398   assert_bounds();
 399 }
 400 
 401 void ShenandoahFreeSet::clear() {
 402   assert_heaplock_owned_by_current_thread();
 403   clear_internal();
 404 }
 405 
 406 void ShenandoahFreeSet::clear_internal() {
 407   _mutator_free_bitmap.clear();
 408   _collector_free_bitmap.clear();
 409   _mutator_leftmost = _max;
 410   _mutator_rightmost = 0;
 411   _collector_leftmost = _max;
 412   _collector_rightmost = 0;
 413   _capacity = 0;
 414   _used = 0;
 415 }
 416 
 417 void ShenandoahFreeSet::rebuild() {
 418   assert_heaplock_owned_by_current_thread();
 419   clear();
 420 
 421   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 422     ShenandoahHeapRegion* region = _heap->get_region(idx);
 423     if (region->is_alloc_allowed() || region->is_trash()) {
 424       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
 425 
 426       // Do not add regions that would surely fail allocation
 427       if (has_no_alloc_capacity(region)) continue;
 428 
 429       _capacity += alloc_capacity(region);
 430       assert(_used <= _capacity, "must not use more than we have");
 431 
 432       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 433       _mutator_free_bitmap.set_bit(idx);
 434     }
 435   }
 436 
 437   // Evac reserve: reserve trailing space for evacuations
 438   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
 439   size_t reserved = 0;
 440 
 441   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 442     if (reserved >= to_reserve) break;
 443 
 444     ShenandoahHeapRegion* region = _heap->get_region(idx);
 445     if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) {
 446       _mutator_free_bitmap.clear_bit(idx);
 447       _collector_free_bitmap.set_bit(idx);
 448       size_t ac = alloc_capacity(region);
 449       _capacity -= ac;
 450       reserved += ac;
 451     }
 452   }
 453 
 454   recompute_bounds();
 455   assert_bounds();
 456 }
 457 
 458 void ShenandoahFreeSet::log_status() {
 459   assert_heaplock_owned_by_current_thread();
 460 
 461   LogTarget(Info, gc, ergo) lt;
 462   if (lt.is_enabled()) {
 463     ResourceMark rm;
 464     LogStream ls(lt);
 465 
 466     {
 467       size_t last_idx = 0;
 468       size_t max = 0;
 469       size_t max_contig = 0;
 470       size_t empty_contig = 0;
 471 
 472       size_t total_used = 0;
 473       size_t total_free = 0;
 474 
 475       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 476         if (is_mutator_free(idx)) {
 477           ShenandoahHeapRegion *r = _heap->get_region(idx);
 478           size_t free = alloc_capacity(r);
 479 
 480           max = MAX2(max, free);
 481 
 482           if (r->is_empty() && (last_idx + 1 == idx)) {
 483             empty_contig++;
 484           } else {
 485             empty_contig = 0;
 486           }
 487 
 488           total_used += r->used();
 489           total_free += free;
 490 
 491           max_contig = MAX2(max_contig, empty_contig);
 492           last_idx = idx;
 493         }
 494       }
 495 
 496       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 497       size_t free = capacity() - used();
 498 
 499       ls.print("Free: " SIZE_FORMAT "%s (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "%s, Max humongous: " SIZE_FORMAT "%s, ",
 500                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
 501                mutator_count(),
 502                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
 503                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
 504       );
 505 
 506       size_t frag_ext;
 507       if (free > 0) {
 508         frag_ext = 100 - (100 * max_humongous / free);
 509       } else {
 510         frag_ext = 0;
 511       }
 512       ls.print("External frag: " SIZE_FORMAT "%%, ", frag_ext);
 513 
 514       size_t frag_int;
 515       if (mutator_count() > 0) {
 516         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 517       } else {
 518         frag_int = 0;
 519       }
 520       ls.print("Internal frag: " SIZE_FORMAT "%%", frag_int);
 521       ls.cr();
 522     }
 523 
 524     {
 525       size_t max = 0;
 526       size_t total_free = 0;
 527 
 528       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 529         if (is_collector_free(idx)) {
 530           ShenandoahHeapRegion *r = _heap->get_region(idx);
 531           size_t free = alloc_capacity(r);
 532           max = MAX2(max, free);
 533           total_free += free;
 534         }
 535       }
 536 
 537       ls.print_cr("Evacuation Reserve: " SIZE_FORMAT "%s (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "%s",
 538                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
 539                   collector_count(),
 540                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
 541     }
 542   }
 543 }
 544 
 545 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
 546   assert_heaplock_owned_by_current_thread();
 547   assert_bounds();
 548 
 549   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 550     switch (req.type()) {
 551       case ShenandoahAllocRequest::_alloc_shared:
 552       case ShenandoahAllocRequest::_alloc_shared_gc:
 553         in_new_region = true;
 554         return allocate_contiguous(req);
 555       case ShenandoahAllocRequest::_alloc_gclab:
 556       case ShenandoahAllocRequest::_alloc_tlab:
 557         in_new_region = false;
 558         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 559                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
 560         return NULL;
 561       default:
 562         ShouldNotReachHere();
 563         return NULL;
 564     }
 565   } else {
 566     return allocate_single(req, in_new_region);
 567   }
 568 }
 569 
 570 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 571   // Deliberately not locked, this method is unsafe when free set is modified.
 572 
 573   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 574     if (index < _max && is_mutator_free(index)) {
 575       ShenandoahHeapRegion* r = _heap->get_region(index);
 576       if (r->free() >= MinTLABSize) {
 577         return r->free();
 578       }
 579     }
 580   }
 581 
 582   // It appears that no regions left
 583   return 0;
 584 }
 585 
 586 void ShenandoahFreeSet::print_on(outputStream* out) const {
 587   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 588   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 589     if (is_mutator_free(index)) {
 590       _heap->get_region(index)->print_on(out);
 591     }
 592   }
 593   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 594   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 595     if (is_collector_free(index)) {
 596       _heap->get_region(index)->print_on(out);
 597     }
 598   }
 599 }
 600 
 601 #ifdef ASSERT
 602 void ShenandoahFreeSet::assert_heaplock_owned_by_current_thread() const {
 603   _heap->assert_heaplock_owned_by_current_thread();
 604 }
 605 
 606 void ShenandoahFreeSet::assert_heaplock_not_owned_by_current_thread() const {
 607   _heap->assert_heaplock_not_owned_by_current_thread();
 608 }
 609 
 610 void ShenandoahFreeSet::assert_bounds() const {
 611   // Performance invariants. Failing these would not break the free set, but performance
 612   // would suffer.
 613   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
 614   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
 615 
 616   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
 617   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
 618 
 619   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
 620   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
 621   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
 622   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
 623 
 624   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
 625   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
 626 
 627   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
 628   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
 629 
 630   beg_off = _collector_free_bitmap.get_next_one_offset(0);
 631   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
 632   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
 633   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
 634 }
 635 #endif