1 /* 2 * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/tlab_globals.hpp" 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 28 #include "gc/shenandoah/shenandoahFreeSet.hpp" 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 32 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 33 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 34 #include "logging/logStream.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "runtime/orderAccess.hpp" 37 38 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : 39 _heap(heap), 40 _mutator_free_bitmap(max_regions, mtGC), 41 _collector_free_bitmap(max_regions, mtGC), 42 _max(max_regions) 43 { 44 clear_internal(); 45 } 46 47 void ShenandoahFreeSet::increase_used(size_t num_bytes) { 48 shenandoah_assert_heaplocked(); 49 _used += num_bytes; 50 51 assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT 52 ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes); 53 } 54 55 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const { 56 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", 57 idx, _max, _mutator_leftmost, _mutator_rightmost); 58 return _mutator_free_bitmap.at(idx); 59 } 60 61 bool ShenandoahFreeSet::is_collector_free(size_t idx) const { 62 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")", 63 idx, _max, _collector_leftmost, _collector_rightmost); 64 return _collector_free_bitmap.at(idx); 65 } 66 67 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahRegionAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) { 68 for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) { 69 // size_t is unsigned, need to dodge underflow when _leftmost = 0 70 size_t idx = c - 1; 71 if (is_collector_free(idx)) { 72 ShenandoahHeapRegion* r = _heap->get_region(idx); 73 if (r->affiliation() == affiliation) { 74 HeapWord* result = try_allocate_in(r, req, in_new_region); 75 if (result != NULL) { 76 return result; 77 } 78 } 79 } 80 } 81 return NULL; 82 } 83 84 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) { 85 // Scan the bitmap looking for a first fit. 86 // 87 // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally, 88 // we would find the region to allocate at right away. 89 // 90 // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs 91 // go to the end. This makes application allocation faster, because we would clear lots 92 // of regions from the beginning most of the time. 93 // 94 // Free set maintains mutator and collector views, and normally they allocate in their views only, 95 // unless we special cases for stealing and mixed allocations. 96 97 // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping. 98 99 switch (req.type()) { 100 case ShenandoahAllocRequest::_alloc_tlab: 101 case ShenandoahAllocRequest::_alloc_shared: { 102 // Try to allocate in the mutator view 103 for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { 104 if (is_mutator_free(idx)) { 105 // try_allocate_in() increases used if the allocation is successful. 106 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); 107 if (result != NULL) { 108 return result; 109 } 110 } 111 } 112 113 // There is no recovery. Mutator does not touch collector view at all. 114 break; 115 } 116 case ShenandoahAllocRequest::_alloc_gclab: 117 // GCLABs are for evacuation so we must be in evacuation phase. If this allocation is successful, increment 118 // the relevant evac_expended rather than used value. 119 120 case ShenandoahAllocRequest::_alloc_plab: 121 // PLABs always reside in old-gen and are only allocated during evacuation phase. 122 123 case ShenandoahAllocRequest::_alloc_shared_gc: { 124 // First try to fit into a region that is already in use in the same generation. 125 HeapWord* result = allocate_with_affiliation(req.affiliation(), req, in_new_region); 126 if (result != NULL) { 127 return result; 128 } 129 // Then try a free region that is dedicated to GC allocations. 130 result = allocate_with_affiliation(FREE, req, in_new_region); 131 if (result != NULL) { 132 return result; 133 } 134 135 // No dice. Can we borrow space from mutator view? 136 if (!ShenandoahEvacReserveOverflow) { 137 return NULL; 138 } 139 140 // Try to steal an empty region from the mutator view. 141 for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) { 142 size_t idx = c - 1; 143 if (is_mutator_free(idx)) { 144 ShenandoahHeapRegion* r = _heap->get_region(idx); 145 if (can_allocate_from(r)) { 146 flip_to_gc(r); 147 HeapWord *result = try_allocate_in(r, req, in_new_region); 148 if (result != NULL) { 149 return result; 150 } 151 } 152 } 153 } 154 155 // No dice. Do not try to mix mutator and GC allocations, because 156 // URWM moves due to GC allocations would expose unparsable mutator 157 // allocations. 158 break; 159 } 160 default: 161 ShouldNotReachHere(); 162 } 163 return NULL; 164 } 165 166 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { 167 assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()); 168 169 if (_heap->is_concurrent_weak_root_in_progress() && 170 r->is_trash()) { 171 return NULL; 172 } 173 174 try_recycle_trashed(r); 175 176 if (r->affiliation() == ShenandoahRegionAffiliation::FREE) { 177 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); 178 179 r->set_affiliation(req.affiliation()); 180 181 if (r->is_old()) { 182 // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because 183 // all objects allocated within this region are above TAMS (and thus are implicitly marked). In case this is an 184 // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next 185 // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before 186 // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any 187 // coalesce-and-fill processing. 188 r->end_preemptible_coalesce_and_fill(); 189 _heap->clear_cards_for(r); 190 } 191 192 assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); 193 assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear"); 194 195 // Leave top_bitmap alone. The first time a heap region is put into service, top_bitmap should equal end. 196 // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared. 197 log_debug(gc)("NOT clearing bitmap for region " SIZE_FORMAT ", top_bitmap: " 198 PTR_FORMAT " at transition from FREE to %s", 199 r->index(), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation())); 200 } else if (r->affiliation() != req.affiliation()) { 201 return NULL; 202 } 203 204 in_new_region = r->is_empty(); 205 206 HeapWord* result = NULL; 207 size_t size = req.size(); 208 209 // req.size() is in words, r->free() is in bytes. 210 if (ShenandoahElasticTLAB && req.is_lab_alloc()) { 211 if (req.type() == ShenandoahAllocRequest::_alloc_plab) { 212 // Need to assure that plabs are aligned on multiple of card region. 213 size_t free = r->free(); 214 size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift(); 215 if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) { 216 // We'll have to add another card's memory to the padding 217 usable_free -= CardTable::card_size(); 218 } 219 free /= HeapWordSize; 220 usable_free /= HeapWordSize; 221 if (size > usable_free) { 222 size = usable_free; 223 } 224 if (size >= req.min_size()) { 225 result = r->allocate_aligned(size, req, CardTable::card_size()); 226 if (result != nullptr && free > usable_free) { 227 // Account for the alignment padding 228 size_t padding = (free - usable_free) * HeapWordSize; 229 increase_used(padding); 230 assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen"); 231 _heap->old_generation()->increase_used(padding); 232 // For verification consistency, we need to report this padding to _heap 233 _heap->increase_used(padding); 234 } 235 } 236 } else { 237 // This is a GCLAB or a TLAB allocation 238 size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment); 239 if (size > free) { 240 size = free; 241 } 242 if (size >= req.min_size()) { 243 result = r->allocate(size, req); 244 assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size); 245 } 246 } 247 } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) { 248 size_t free = r->free(); 249 size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift(); 250 free /= HeapWordSize; 251 usable_free /= HeapWordSize; 252 if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) { 253 // We'll have to add another card's memory to the padding 254 usable_free -= CardTable::card_size(); 255 } 256 if (size <= usable_free) { 257 assert(size % CardTable::card_size_in_words() == 0, "PLAB size must be multiple of remembered set card size"); 258 259 result = r->allocate_aligned(size, req, CardTable::card_size()); 260 if (result != nullptr) { 261 // Account for the alignment padding 262 size_t padding = (free - usable_free) * HeapWordSize; 263 increase_used(padding); 264 assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen"); 265 _heap->old_generation()->increase_used(padding); 266 // For verification consistency, we need to report this padding to _heap 267 _heap->increase_used(padding); 268 } 269 } 270 } else { 271 result = r->allocate(size, req); 272 } 273 274 if (result != NULL) { 275 // Record actual allocation size 276 req.set_actual_size(size); 277 278 // Allocation successful, bump stats: 279 if (req.is_mutator_alloc()) { 280 // Mutator allocations always pull from young gen. 281 _heap->young_generation()->increase_used(size * HeapWordSize); 282 increase_used(size * HeapWordSize); 283 } else { 284 // assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc"); 285 286 // For GC allocations, we advance update_watermark because the objects relocated into this memory during 287 // evacuation are not updated during evacuation. For both young and old regions r, it is essential that all 288 // PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation. 289 // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require 290 // that the PLAB be disabled for all future purposes. We may want to introduce a new service to make the 291 // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the 292 // next evacuation pass. 293 r->set_update_watermark(r->top()); 294 295 if (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) { 296 _heap->young_generation()->increase_used(size * HeapWordSize); 297 } else { 298 assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "GC Alloc was not YOUNG so must be OLD"); 299 assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation"); 300 _heap->old_generation()->increase_used(size * HeapWordSize); 301 // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab 302 } 303 } 304 } 305 if (result == NULL || has_no_alloc_capacity(r)) { 306 // Region cannot afford this or future allocations. Retire it. 307 // 308 // While this seems a bit harsh, especially in the case when this large allocation does not 309 // fit, but the next small one would, we are risking to inflate scan times when lots of 310 // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB. 311 // TODO: Record first fully-empty region, and use that for large allocations and/or organize 312 // available free segments within regions for more efficient searches for "good fit". 313 314 // Record the remainder as allocation waste 315 if (req.is_mutator_alloc()) { 316 size_t waste = r->free(); 317 if (waste > 0) { 318 increase_used(waste); 319 _heap->generation_for(req.affiliation())->increase_allocated(waste); 320 _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true); 321 } 322 } 323 324 size_t num = r->index(); 325 _collector_free_bitmap.clear_bit(num); 326 _mutator_free_bitmap.clear_bit(num); 327 // Touched the bounds? Need to update: 328 if (touches_bounds(num)) { 329 adjust_bounds(); 330 } 331 assert_bounds(); 332 } 333 return result; 334 } 335 336 bool ShenandoahFreeSet::touches_bounds(size_t num) const { 337 return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost; 338 } 339 340 void ShenandoahFreeSet::recompute_bounds() { 341 // Reset to the most pessimistic case: 342 _mutator_rightmost = _max - 1; 343 _mutator_leftmost = 0; 344 _collector_rightmost = _max - 1; 345 _collector_leftmost = 0; 346 347 // ...and adjust from there 348 adjust_bounds(); 349 } 350 351 void ShenandoahFreeSet::adjust_bounds() { 352 // Rewind both mutator bounds until the next bit. 353 while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) { 354 _mutator_leftmost++; 355 } 356 while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) { 357 _mutator_rightmost--; 358 } 359 // Rewind both collector bounds until the next bit. 360 while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) { 361 _collector_leftmost++; 362 } 363 while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) { 364 _collector_rightmost--; 365 } 366 } 367 368 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { 369 shenandoah_assert_heaplocked(); 370 371 size_t words_size = req.size(); 372 size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 373 374 // No regions left to satisfy allocation, bye. 375 if (num > mutator_count()) { 376 return NULL; 377 } 378 379 // Find the continuous interval of $num regions, starting from $beg and ending in $end, 380 // inclusive. Contiguous allocations are biased to the beginning. 381 382 size_t beg = _mutator_leftmost; 383 size_t end = beg; 384 385 while (true) { 386 if (end >= _max) { 387 // Hit the end, goodbye 388 return NULL; 389 } 390 391 // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward. 392 // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. 393 if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) { 394 end++; 395 beg = end; 396 continue; 397 } 398 399 if ((end - beg + 1) == num) { 400 // found the match 401 break; 402 } 403 404 end++; 405 }; 406 407 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 408 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); 409 410 // Initialize regions: 411 for (size_t i = beg; i <= end; i++) { 412 ShenandoahHeapRegion* r = _heap->get_region(i); 413 try_recycle_trashed(r); 414 415 assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); 416 assert(r->is_empty(), "Should be empty"); 417 418 if (i == beg) { 419 r->make_humongous_start(); 420 } else { 421 r->make_humongous_cont(); 422 } 423 424 // Trailing region may be non-full, record the remainder there 425 size_t used_words; 426 if ((i == end) && (remainder != 0)) { 427 used_words = remainder; 428 } else { 429 used_words = ShenandoahHeapRegion::region_size_words(); 430 } 431 432 r->set_affiliation(req.affiliation()); 433 r->set_update_watermark(r->bottom()); 434 r->set_top(r->bottom()); // Set top to bottom so we can capture TAMS 435 ctx->capture_top_at_mark_start(r); 436 r->set_top(r->bottom() + used_words); // Then change top to reflect allocation of humongous object. 437 assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); 438 assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear"); 439 440 // Leave top_bitmap alone. The first time a heap region is put into service, top_bitmap should equal end. 441 // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared. 442 // ctx->clear_bitmap(r); 443 log_debug(gc)("NOT clearing bitmap for Humongous region [" PTR_FORMAT ", " PTR_FORMAT "], top_bitmap: " 444 PTR_FORMAT " at transition from FREE to %s", 445 p2i(r->bottom()), p2i(r->end()), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation())); 446 447 _mutator_free_bitmap.clear_bit(r->index()); 448 } 449 450 // While individual regions report their true use, all humongous regions are 451 // marked used in the free set. 452 increase_used(ShenandoahHeapRegion::region_size_bytes() * num); 453 if (req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) { 454 _heap->young_generation()->increase_used(words_size * HeapWordSize); 455 } else if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) { 456 _heap->old_generation()->increase_used(words_size * HeapWordSize); 457 } 458 459 if (remainder != 0) { 460 // Record this remainder as allocation waste 461 size_t waste = ShenandoahHeapRegion::region_size_words() - remainder; 462 _heap->notify_mutator_alloc_words(waste, true); 463 _heap->generation_for(req.affiliation())->increase_allocated(waste * HeapWordSize); 464 } 465 466 // Allocated at left/rightmost? Move the bounds appropriately. 467 if (beg == _mutator_leftmost || end == _mutator_rightmost) { 468 adjust_bounds(); 469 } 470 assert_bounds(); 471 472 req.set_actual_size(words_size); 473 return _heap->get_region(beg)->bottom(); 474 } 475 476 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) { 477 return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress()); 478 } 479 480 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) { 481 if (r->is_trash()) { 482 // This would be recycled on allocation path 483 return ShenandoahHeapRegion::region_size_bytes(); 484 } else { 485 return r->free(); 486 } 487 } 488 489 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) { 490 return alloc_capacity(r) == 0; 491 } 492 493 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) { 494 if (r->is_trash()) { 495 _heap->decrease_used(r->used()); 496 r->recycle(); 497 } 498 } 499 500 void ShenandoahFreeSet::recycle_trash() { 501 // lock is not reentrable, check we don't have it 502 shenandoah_assert_not_heaplocked(); 503 for (size_t i = 0; i < _heap->num_regions(); i++) { 504 ShenandoahHeapRegion* r = _heap->get_region(i); 505 if (r->is_trash()) { 506 ShenandoahHeapLocker locker(_heap->lock()); 507 try_recycle_trashed(r); 508 } 509 SpinPause(); // allow allocators to take the lock 510 } 511 } 512 513 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { 514 size_t idx = r->index(); 515 516 assert(_mutator_free_bitmap.at(idx), "Should be in mutator view"); 517 assert(can_allocate_from(r), "Should not be allocated"); 518 519 _mutator_free_bitmap.clear_bit(idx); 520 _collector_free_bitmap.set_bit(idx); 521 _collector_leftmost = MIN2(idx, _collector_leftmost); 522 _collector_rightmost = MAX2(idx, _collector_rightmost); 523 524 _capacity -= alloc_capacity(r); 525 526 if (touches_bounds(idx)) { 527 adjust_bounds(); 528 } 529 assert_bounds(); 530 531 // We do not ensure that the region is no longer trash, 532 // relying on try_allocate_in(), which always comes next, 533 // to recycle trash before attempting to allocate anything in the region. 534 } 535 536 void ShenandoahFreeSet::clear() { 537 shenandoah_assert_heaplocked(); 538 clear_internal(); 539 } 540 541 void ShenandoahFreeSet::clear_internal() { 542 _mutator_free_bitmap.clear(); 543 _collector_free_bitmap.clear(); 544 _mutator_leftmost = _max; 545 _mutator_rightmost = 0; 546 _collector_leftmost = _max; 547 _collector_rightmost = 0; 548 _capacity = 0; 549 _used = 0; 550 } 551 552 void ShenandoahFreeSet::rebuild() { 553 shenandoah_assert_heaplocked(); 554 clear(); 555 556 log_debug(gc)("Rebuilding FreeSet"); 557 for (size_t idx = 0; idx < _heap->num_regions(); idx++) { 558 ShenandoahHeapRegion* region = _heap->get_region(idx); 559 if (region->is_alloc_allowed() || region->is_trash()) { 560 assert(!region->is_cset(), "Shouldn't be adding those to the free set"); 561 562 // Do not add regions that would surely fail allocation 563 if (has_no_alloc_capacity(region)) continue; 564 565 _capacity += alloc_capacity(region); 566 assert(_used <= _capacity, "must not use more than we have"); 567 568 assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already"); 569 _mutator_free_bitmap.set_bit(idx); 570 571 log_debug(gc)(" Setting Region " SIZE_FORMAT " _mutator_free_bitmap bit to true", idx); 572 } 573 } 574 575 // Evac reserve: reserve trailing space for evacuations 576 size_t to_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve; 577 size_t reserved = 0; 578 579 for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) { 580 if (reserved >= to_reserve) break; 581 582 ShenandoahHeapRegion* region = _heap->get_region(idx); 583 if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) { 584 _mutator_free_bitmap.clear_bit(idx); 585 _collector_free_bitmap.set_bit(idx); 586 size_t ac = alloc_capacity(region); 587 _capacity -= ac; 588 reserved += ac; 589 log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx); 590 } 591 } 592 593 recompute_bounds(); 594 assert_bounds(); 595 } 596 597 void ShenandoahFreeSet::log_status() { 598 shenandoah_assert_heaplocked(); 599 600 LogTarget(Info, gc, ergo) lt; 601 if (lt.is_enabled()) { 602 ResourceMark rm; 603 LogStream ls(lt); 604 605 { 606 size_t last_idx = 0; 607 size_t max = 0; 608 size_t max_contig = 0; 609 size_t empty_contig = 0; 610 611 size_t total_used = 0; 612 size_t total_free = 0; 613 size_t total_free_ext = 0; 614 615 for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) { 616 if (is_mutator_free(idx)) { 617 ShenandoahHeapRegion *r = _heap->get_region(idx); 618 size_t free = alloc_capacity(r); 619 620 max = MAX2(max, free); 621 622 if (r->is_empty()) { 623 total_free_ext += free; 624 if (last_idx + 1 == idx) { 625 empty_contig++; 626 } else { 627 empty_contig = 1; 628 } 629 } else { 630 empty_contig = 0; 631 } 632 633 total_used += r->used(); 634 total_free += free; 635 636 max_contig = MAX2(max_contig, empty_contig); 637 last_idx = idx; 638 } 639 } 640 641 size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); 642 size_t free = capacity() - used(); 643 644 ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ", 645 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), 646 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), 647 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous) 648 ); 649 650 ls.print("Frag: "); 651 size_t frag_ext; 652 if (total_free_ext > 0) { 653 frag_ext = 100 - (100 * max_humongous / total_free_ext); 654 } else { 655 frag_ext = 0; 656 } 657 ls.print(SIZE_FORMAT "%% external, ", frag_ext); 658 659 size_t frag_int; 660 if (mutator_count() > 0) { 661 frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes()); 662 } else { 663 frag_int = 0; 664 } 665 ls.print(SIZE_FORMAT "%% internal; ", frag_int); 666 } 667 668 { 669 size_t max = 0; 670 size_t total_free = 0; 671 672 for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) { 673 if (is_collector_free(idx)) { 674 ShenandoahHeapRegion *r = _heap->get_region(idx); 675 size_t free = alloc_capacity(r); 676 max = MAX2(max, free); 677 total_free += free; 678 } 679 } 680 681 ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s", 682 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), 683 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max)); 684 } 685 } 686 } 687 688 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { 689 shenandoah_assert_heaplocked(); 690 assert_bounds(); 691 692 // Allocation request is known to satisfy all memory budgeting constraints. 693 if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) { 694 switch (req.type()) { 695 case ShenandoahAllocRequest::_alloc_shared: 696 case ShenandoahAllocRequest::_alloc_shared_gc: 697 in_new_region = true; 698 return allocate_contiguous(req); 699 case ShenandoahAllocRequest::_alloc_plab: 700 case ShenandoahAllocRequest::_alloc_gclab: 701 case ShenandoahAllocRequest::_alloc_tlab: 702 in_new_region = false; 703 assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT, 704 req.size(), ShenandoahHeapRegion::humongous_threshold_words()); 705 return NULL; 706 default: 707 ShouldNotReachHere(); 708 return NULL; 709 } 710 } else { 711 return allocate_single(req, in_new_region); 712 } 713 } 714 715 size_t ShenandoahFreeSet::unsafe_peek_free() const { 716 // Deliberately not locked, this method is unsafe when free set is modified. 717 718 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { 719 if (index < _max && is_mutator_free(index)) { 720 ShenandoahHeapRegion* r = _heap->get_region(index); 721 if (r->free() >= MinTLABSize) { 722 return r->free(); 723 } 724 } 725 } 726 727 // It appears that no regions left 728 return 0; 729 } 730 731 void ShenandoahFreeSet::print_on(outputStream* out) const { 732 out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count()); 733 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { 734 if (is_mutator_free(index)) { 735 _heap->get_region(index)->print_on(out); 736 } 737 } 738 out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count()); 739 for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) { 740 if (is_collector_free(index)) { 741 _heap->get_region(index)->print_on(out); 742 } 743 } 744 } 745 746 /* 747 * Internal fragmentation metric: describes how fragmented the heap regions are. 748 * 749 * It is derived as: 750 * 751 * sum(used[i]^2, i=0..k) 752 * IF = 1 - ------------------------------ 753 * C * sum(used[i], i=0..k) 754 * 755 * ...where k is the number of regions in computation, C is the region capacity, and 756 * used[i] is the used space in the region. 757 * 758 * The non-linearity causes IF to be lower for the cases where the same total heap 759 * used is densely packed. For example: 760 * a) Heap is completely full => IF = 0 761 * b) Heap is half full, first 50% regions are completely full => IF = 0 762 * c) Heap is half full, each region is 50% full => IF = 1/2 763 * d) Heap is quarter full, first 50% regions are completely full => IF = 0 764 * e) Heap is quarter full, each region is 25% full => IF = 3/4 765 * f) Heap has one small object per each region => IF =~ 1 766 */ 767 double ShenandoahFreeSet::internal_fragmentation() { 768 double squared = 0; 769 double linear = 0; 770 int count = 0; 771 772 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { 773 if (is_mutator_free(index)) { 774 ShenandoahHeapRegion* r = _heap->get_region(index); 775 size_t used = r->used(); 776 squared += used * used; 777 linear += used; 778 count++; 779 } 780 } 781 782 if (count > 0) { 783 double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear); 784 return 1 - s; 785 } else { 786 return 0; 787 } 788 } 789 790 /* 791 * External fragmentation metric: describes how fragmented the heap is. 792 * 793 * It is derived as: 794 * 795 * EF = 1 - largest_contiguous_free / total_free 796 * 797 * For example: 798 * a) Heap is completely empty => EF = 0 799 * b) Heap is completely full => EF = 0 800 * c) Heap is first-half full => EF = 1/2 801 * d) Heap is half full, full and empty regions interleave => EF =~ 1 802 */ 803 double ShenandoahFreeSet::external_fragmentation() { 804 size_t last_idx = 0; 805 size_t max_contig = 0; 806 size_t empty_contig = 0; 807 808 size_t free = 0; 809 810 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) { 811 if (is_mutator_free(index)) { 812 ShenandoahHeapRegion* r = _heap->get_region(index); 813 if (r->is_empty()) { 814 free += ShenandoahHeapRegion::region_size_bytes(); 815 if (last_idx + 1 == index) { 816 empty_contig++; 817 } else { 818 empty_contig = 1; 819 } 820 } else { 821 empty_contig = 0; 822 } 823 824 max_contig = MAX2(max_contig, empty_contig); 825 last_idx = index; 826 } 827 } 828 829 if (free > 0) { 830 return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free); 831 } else { 832 return 0; 833 } 834 } 835 836 #ifdef ASSERT 837 void ShenandoahFreeSet::assert_bounds() const { 838 // Performance invariants. Failing these would not break the free set, but performance 839 // would suffer. 840 assert (_mutator_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost, _max); 841 assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max); 842 843 assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost), "leftmost region should be free: " SIZE_FORMAT, _mutator_leftmost); 844 assert (_mutator_rightmost == 0 || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost); 845 846 size_t beg_off = _mutator_free_bitmap.get_next_one_offset(0); 847 size_t end_off = _mutator_free_bitmap.get_next_one_offset(_mutator_rightmost + 1); 848 assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost); 849 assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _mutator_rightmost); 850 851 assert (_collector_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost, _max); 852 assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max); 853 854 assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost), "leftmost region should be free: " SIZE_FORMAT, _collector_leftmost); 855 assert (_collector_rightmost == 0 || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost); 856 857 beg_off = _collector_free_bitmap.get_next_one_offset(0); 858 end_off = _collector_free_bitmap.get_next_one_offset(_collector_rightmost + 1); 859 assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost); 860 assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _collector_rightmost); 861 } 862 #endif