1 /*
   2  * Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  28 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  29 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  30 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  31 #include "logging/logStream.hpp"
  32 
  33 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
  34   _heap(heap),
  35   _mutator_free_bitmap(max_regions, mtGC),
  36   _collector_free_bitmap(max_regions, mtGC),
  37   _max(max_regions)
  38 {
  39   clear_internal();
  40 }
  41 
  42 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  43   assert_heaplock_owned_by_current_thread();
  44   _used += num_bytes;
  45 
  46   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
  47          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
  48 }
  49 
  50 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  51   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  52           idx, _max, _mutator_leftmost, _mutator_rightmost);
  53   return _mutator_free_bitmap.at(idx);
  54 }
  55 
  56 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  57   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  58           idx, _max, _collector_leftmost, _collector_rightmost);
  59   return _collector_free_bitmap.at(idx);
  60 }
  61 
  62 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
  63   // Scan the bitmap looking for a first fit.
  64   //
  65   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  66   // we would find the region to allocate at right away.
  67   //
  68   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  69   // go to the end. This makes application allocation faster, because we would clear lots
  70   // of regions from the beginning most of the time.
  71   //
  72   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  73   // unless we special cases for stealing and mixed allocations.
  74 
  75   switch (req.type()) {
  76     case ShenandoahAllocRequest::_alloc_tlab:
  77     case ShenandoahAllocRequest::_alloc_shared: {
  78 
  79       // Try to allocate in the mutator view
  80       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  81         if (is_mutator_free(idx)) {
  82           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  83           if (result != NULL) {
  84             return result;
  85           }
  86         }
  87       }
  88 
  89       // There is no recovery. Mutator does not touch collector view at all.
  90       break;
  91     }
  92     case ShenandoahAllocRequest::_alloc_gclab:
  93     case ShenandoahAllocRequest::_alloc_shared_gc: {
  94       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  95 
  96       // Fast-path: try to allocate in the collector view first
  97       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
  98         size_t idx = c - 1;
  99         if (is_collector_free(idx)) {
 100           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 101           if (result != NULL) {
 102             return result;
 103           }
 104         }
 105       }
 106 
 107       // No dice. Can we borrow space from mutator view?
 108       if (!ShenandoahEvacReserveOverflow) {
 109         return NULL;
 110       }
 111 
 112       // Try to steal the empty region from the mutator view
 113       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 114         size_t idx = c - 1;
 115         if (is_mutator_free(idx)) {
 116           ShenandoahHeapRegion* r = _heap->get_region(idx);
 117           if (is_empty_or_trash(r)) {
 118             flip_to_gc(r);
 119             HeapWord *result = try_allocate_in(r, req, in_new_region);
 120             if (result != NULL) {
 121               return result;
 122             }
 123           }
 124         }
 125       }
 126 
 127       // Try to mix the allocation into the mutator view:
 128       if (ShenandoahAllowMixedAllocs) {
 129         for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 130           size_t idx = c - 1;
 131           if (is_mutator_free(idx)) {
 132             HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 133             if (result != NULL) {
 134               return result;
 135             }
 136           }
 137         }
 138       }
 139       break;
 140     }
 141     default:
 142       ShouldNotReachHere();
 143   }
 144 
 145   return NULL;
 146 }
 147 
 148 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 149   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number());
 150 
 151   try_recycle_trashed(r);
 152 
 153   in_new_region = r->is_empty();
 154 
 155   HeapWord* result = NULL;
 156   size_t size = req.size();
 157 
 158   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
 159     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 160     if (size > free) {
 161       size = free;
 162     }
 163     if (size >= req.min_size()) {
 164       result = r->allocate(size, req.type());
 165       assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
 166     }
 167   } else {
 168     result = r->allocate(size, req.type());
 169   }
 170 
 171   if (result != NULL) {
 172     // Allocation successful, bump stats:
 173     if (req.is_mutator_alloc()) {
 174       increase_used(size * HeapWordSize);
 175     }
 176 
 177     // Record actual allocation size
 178     req.set_actual_size(size);
 179 
 180     if (req.is_gc_alloc() && _heap->is_concurrent_traversal_in_progress()) {
 181       // Traversal needs to traverse through GC allocs. Adjust TAMS to the new top
 182       // so that these allocations appear below TAMS, and thus get traversed.
 183       // See top of shenandoahTraversal.cpp for an explanation.
 184       _heap->marking_context()->capture_top_at_mark_start(r);
 185       _heap->traversal_gc()->traversal_set()->add_region_check_for_duplicates(r);
 186       OrderAccess::fence();
 187     }
 188   }
 189 
 190   if (result == NULL || has_no_alloc_capacity(r)) {
 191     // Region cannot afford this or future allocations. Retire it.
 192     //
 193     // While this seems a bit harsh, especially in the case when this large allocation does not
 194     // fit, but the next small one would, we are risking to inflate scan times when lots of
 195     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 196     // TODO: Record first fully-empty region, and use that for large allocations
 197 
 198     // Record the remainder as allocation waste
 199     if (req.is_mutator_alloc()) {
 200       size_t waste = r->free();
 201       if (waste > 0) {
 202         increase_used(waste);
 203         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
 204       }
 205     }
 206 
 207     size_t num = r->region_number();
 208     _collector_free_bitmap.clear_bit(num);
 209     _mutator_free_bitmap.clear_bit(num);
 210     // Touched the bounds? Need to update:
 211     if (touches_bounds(num)) {
 212       adjust_bounds();
 213     }
 214     assert_bounds();
 215   }
 216   return result;
 217 }
 218 
 219 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 220   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
 221 }
 222 
 223 void ShenandoahFreeSet::recompute_bounds() {
 224   // Reset to the most pessimistic case:
 225   _mutator_rightmost = _max - 1;
 226   _mutator_leftmost = 0;
 227   _collector_rightmost = _max - 1;
 228   _collector_leftmost = 0;
 229 
 230   // ...and adjust from there
 231   adjust_bounds();
 232 }
 233 
 234 void ShenandoahFreeSet::adjust_bounds() {
 235   // Rewind both mutator bounds until the next bit.
 236   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 237     _mutator_leftmost++;
 238   }
 239   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 240     _mutator_rightmost--;
 241   }
 242   // Rewind both collector bounds until the next bit.
 243   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 244     _collector_leftmost++;
 245   }
 246   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 247     _collector_rightmost--;
 248   }
 249 }
 250 
 251 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 252   assert_heaplock_owned_by_current_thread();
 253 
 254   size_t words_size = req.size();
 255   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 256 
 257   // No regions left to satisfy allocation, bye.
 258   if (num > mutator_count()) {
 259     return NULL;
 260   }
 261 
 262   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 263   // inclusive. Contiguous allocations are biased to the beginning.
 264 
 265   size_t beg = _mutator_leftmost;
 266   size_t end = beg;
 267 
 268   while (true) {
 269     if (end >= _max) {
 270       // Hit the end, goodbye
 271       return NULL;
 272     }
 273 
 274     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 275     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 276     if (!is_mutator_free(end) || !is_empty_or_trash(_heap->get_region(end))) {
 277       end++;
 278       beg = end;
 279       continue;
 280     }
 281 
 282     if ((end - beg + 1) == num) {
 283       // found the match
 284       break;
 285     }
 286 
 287     end++;
 288   };
 289 
 290   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 291 
 292   // Initialize regions:
 293   for (size_t i = beg; i <= end; i++) {
 294     ShenandoahHeapRegion* r = _heap->get_region(i);
 295     try_recycle_trashed(r);
 296 
 297     assert(i == beg || _heap->get_region(i-1)->region_number() + 1 == r->region_number(), "Should be contiguous");
 298     assert(r->is_empty(), "Should be empty");
 299 
 300     if (i == beg) {
 301       r->make_humongous_start();
 302     } else {
 303       r->make_humongous_cont();
 304     }
 305 
 306     // Trailing region may be non-full, record the remainder there
 307     size_t used_words;
 308     if ((i == end) && (remainder != 0)) {
 309       used_words = remainder;
 310     } else {
 311       used_words = ShenandoahHeapRegion::region_size_words();
 312     }
 313 
 314     r->set_top(r->bottom() + used_words);
 315     r->reset_alloc_metadata_to_shared();
 316 
 317     _mutator_free_bitmap.clear_bit(r->region_number());
 318   }
 319 
 320   // While individual regions report their true use, all humongous regions are
 321   // marked used in the free set.
 322   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 323 
 324   if (remainder != 0) {
 325     // Record this remainder as allocation waste
 326     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 327   }
 328 
 329   // Allocated at left/rightmost? Move the bounds appropriately.
 330   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 331     adjust_bounds();
 332   }
 333   assert_bounds();
 334 
 335   req.set_actual_size(words_size);
 336   return _heap->get_region(beg)->bottom();
 337 }
 338 
 339 bool ShenandoahFreeSet::is_empty_or_trash(ShenandoahHeapRegion *r) {
 340   return r->is_empty() || r->is_trash();
 341 }
 342 
 343 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 344   if (r->is_trash()) {
 345     // This would be recycled on allocation path
 346     return ShenandoahHeapRegion::region_size_bytes();
 347   } else {
 348     return r->free();
 349   }
 350 }
 351 
 352 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 353   return alloc_capacity(r) == 0;
 354 }
 355 
 356 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 357   if (r->is_trash()) {
 358     _heap->decrease_used(r->used());
 359     r->recycle();
 360   }
 361 }
 362 
 363 void ShenandoahFreeSet::recycle_trash() {
 364   // lock is not reentrable, check we don't have it
 365   assert_heaplock_not_owned_by_current_thread();
 366 
 367   for (size_t i = 0; i < _heap->num_regions(); i++) {
 368     ShenandoahHeapRegion* r = _heap->get_region(i);
 369     if (r->is_trash()) {
 370       ShenandoahHeapLocker locker(_heap->lock());
 371       try_recycle_trashed(r);
 372     }
 373     SpinPause(); // allow allocators to take the lock
 374   }
 375 }
 376 
 377 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 378   size_t idx = r->region_number();
 379 
 380   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 381   assert(is_empty_or_trash(r), "Should not be allocated");
 382 
 383   _mutator_free_bitmap.clear_bit(idx);
 384   _collector_free_bitmap.set_bit(idx);
 385   _collector_leftmost = MIN2(idx, _collector_leftmost);
 386   _collector_rightmost = MAX2(idx, _collector_rightmost);
 387 
 388   _capacity -= alloc_capacity(r);
 389 
 390   if (touches_bounds(idx)) {
 391     adjust_bounds();
 392   }
 393   assert_bounds();
 394 }
 395 
 396 void ShenandoahFreeSet::clear() {
 397   assert_heaplock_owned_by_current_thread();
 398   clear_internal();
 399 }
 400 
 401 void ShenandoahFreeSet::clear_internal() {
 402   _mutator_free_bitmap.clear();
 403   _collector_free_bitmap.clear();
 404   _mutator_leftmost = _max;
 405   _mutator_rightmost = 0;
 406   _collector_leftmost = _max;
 407   _collector_rightmost = 0;
 408   _capacity = 0;
 409   _used = 0;
 410 }
 411 
 412 void ShenandoahFreeSet::rebuild() {
 413   assert_heaplock_owned_by_current_thread();
 414   clear();
 415 
 416   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 417     ShenandoahHeapRegion* region = _heap->get_region(idx);
 418     if (region->is_alloc_allowed() || region->is_trash()) {
 419       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
 420 
 421       // Do not add regions that would surely fail allocation
 422       if (has_no_alloc_capacity(region)) continue;
 423 
 424       _capacity += alloc_capacity(region);
 425       assert(_used <= _capacity, "must not use more than we have");
 426 
 427       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 428       _mutator_free_bitmap.set_bit(idx);
 429     }
 430   }
 431 
 432   // Evac reserve: reserve trailing space for evacuations
 433   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
 434   size_t reserved = 0;
 435 
 436   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 437     if (reserved >= to_reserve) break;
 438 
 439     ShenandoahHeapRegion* region = _heap->get_region(idx);
 440     if (_mutator_free_bitmap.at(idx) && is_empty_or_trash(region)) {
 441       _mutator_free_bitmap.clear_bit(idx);
 442       _collector_free_bitmap.set_bit(idx);
 443       size_t ac = alloc_capacity(region);
 444       _capacity -= ac;
 445       reserved += ac;
 446     }
 447   }
 448 
 449   recompute_bounds();
 450   assert_bounds();
 451 }
 452 
 453 void ShenandoahFreeSet::log_status() {
 454   assert_heaplock_owned_by_current_thread();
 455 
 456   LogTarget(Info, gc, ergo) lt;
 457   if (lt.is_enabled()) {
 458     ResourceMark rm;
 459     LogStream ls(lt);
 460 
 461     {
 462       size_t last_idx = 0;
 463       size_t max = 0;
 464       size_t max_contig = 0;
 465       size_t empty_contig = 0;
 466 
 467       size_t total_used = 0;
 468       size_t total_free = 0;
 469 
 470       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 471         if (is_mutator_free(idx)) {
 472           ShenandoahHeapRegion *r = _heap->get_region(idx);
 473           size_t free = alloc_capacity(r);
 474 
 475           max = MAX2(max, free);
 476 
 477           if (r->is_empty() && (last_idx + 1 == idx)) {
 478             empty_contig++;
 479           } else {
 480             empty_contig = 0;
 481           }
 482 
 483           total_used += r->used();
 484           total_free += free;
 485 
 486           max_contig = MAX2(max_contig, empty_contig);
 487           last_idx = idx;
 488         }
 489       }
 490 
 491       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 492       size_t free = capacity() - used();
 493 
 494       ls.print("Free: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K, Max humongous: " SIZE_FORMAT "K, ",
 495                total_free / M, mutator_count(), max / K, max_humongous / K);
 496 
 497       size_t frag_ext;
 498       if (free > 0) {
 499         frag_ext = 100 - (100 * max_humongous / free);
 500       } else {
 501         frag_ext = 0;
 502       }
 503       ls.print("External frag: " SIZE_FORMAT "%%, ", frag_ext);
 504 
 505       size_t frag_int;
 506       if (mutator_count() > 0) {
 507         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
 508       } else {
 509         frag_int = 0;
 510       }
 511       ls.print("Internal frag: " SIZE_FORMAT "%%", frag_int);
 512       ls.cr();
 513     }
 514 
 515     {
 516       size_t max = 0;
 517       size_t total_free = 0;
 518 
 519       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 520         if (is_collector_free(idx)) {
 521           ShenandoahHeapRegion *r = _heap->get_region(idx);
 522           size_t free = alloc_capacity(r);
 523           max = MAX2(max, free);
 524           total_free += free;
 525         }
 526       }
 527 
 528       ls.print_cr("Evacuation Reserve: " SIZE_FORMAT "M (" SIZE_FORMAT " regions), Max regular: " SIZE_FORMAT "K",
 529                   total_free / M, collector_count(), max / K);
 530     }
 531   }
 532 }
 533 
 534 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
 535   assert_heaplock_owned_by_current_thread();
 536   assert_bounds();
 537 
 538   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 539     switch (req.type()) {
 540       case ShenandoahAllocRequest::_alloc_shared:
 541       case ShenandoahAllocRequest::_alloc_shared_gc:
 542         in_new_region = true;
 543         return allocate_contiguous(req);
 544       case ShenandoahAllocRequest::_alloc_gclab:
 545       case ShenandoahAllocRequest::_alloc_tlab:
 546         in_new_region = false;
 547         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 548                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
 549         return NULL;
 550       default:
 551         ShouldNotReachHere();
 552         return NULL;
 553     }
 554   } else {
 555     return allocate_single(req, in_new_region);
 556   }
 557 }
 558 
 559 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 560   // Deliberately not locked, this method is unsafe when free set is modified.
 561 
 562   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 563     if (index < _max && is_mutator_free(index)) {
 564       ShenandoahHeapRegion* r = _heap->get_region(index);
 565       if (r->free() >= MinTLABSize) {
 566         return r->free();
 567       }
 568     }
 569   }
 570 
 571   // It appears that no regions left
 572   return 0;
 573 }
 574 
 575 void ShenandoahFreeSet::print_on(outputStream* out) const {
 576   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 577   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 578     if (is_mutator_free(index)) {
 579       _heap->get_region(index)->print_on(out);
 580     }
 581   }
 582   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 583   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 584     if (is_collector_free(index)) {
 585       _heap->get_region(index)->print_on(out);
 586     }
 587   }
 588 }
 589 
 590 #ifdef ASSERT
 591 void ShenandoahFreeSet::assert_heaplock_owned_by_current_thread() const {
 592   _heap->assert_heaplock_owned_by_current_thread();
 593 }
 594 
 595 void ShenandoahFreeSet::assert_heaplock_not_owned_by_current_thread() const {
 596   _heap->assert_heaplock_not_owned_by_current_thread();
 597 }
 598 
 599 void ShenandoahFreeSet::assert_bounds() const {
 600   // Performance invariants. Failing these would not break the free set, but performance
 601   // would suffer.
 602   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
 603   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
 604 
 605   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
 606   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
 607 
 608   size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0);
 609   size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1);
 610   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
 611   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
 612 
 613   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
 614   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
 615 
 616   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
 617   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
 618 
 619   beg_off = _collector_free_bitmap.get_next_one_offset(0);
 620   end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1);
 621   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
 622   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
 623 }
 624 #endif