1 /* 2 * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shared/tlab_globals.hpp" 28 #include "gc/shenandoah/shenandoahAffiliation.hpp" 29 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 30 #include "gc/shenandoah/shenandoahFreeSet.hpp" 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 34 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 37 #include "logging/logStream.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "runtime/orderAccess.hpp" 40 41 ShenandoahSetsOfFree::ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set) : 42 _max(max_regions), 43 _free_set(free_set), 44 _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()) 45 { 46 _membership = NEW_C_HEAP_ARRAY(ShenandoahFreeMemoryType, max_regions, mtGC); 47 clear_internal(); 48 } 49 50 ShenandoahSetsOfFree::~ShenandoahSetsOfFree() { 51 FREE_C_HEAP_ARRAY(ShenandoahFreeMemoryType, _membership); 52 } 53 54 55 void ShenandoahSetsOfFree::clear_internal() { 56 for (size_t idx = 0; idx < _max; idx++) { 57 _membership[idx] = NotFree; 58 } 59 60 for (size_t idx = 0; idx < NumFreeSets; idx++) { 61 _leftmosts[idx] = _max; 62 _rightmosts[idx] = 0; 63 _leftmosts_empty[idx] = _max; 64 _rightmosts_empty[idx] = 0; 65 _capacity_of[idx] = 0; 66 _used_by[idx] = 0; 67 } 68 69 _left_to_right_bias[Mutator] = true; 70 _left_to_right_bias[Collector] = false; 71 _left_to_right_bias[OldCollector] = false; 72 73 _region_counts[Mutator] = 0; 74 _region_counts[Collector] = 0; 75 _region_counts[OldCollector] = 0; 76 _region_counts[NotFree] = _max; 77 } 78 79 void ShenandoahSetsOfFree::clear_all() { 80 clear_internal(); 81 } 82 83 void ShenandoahSetsOfFree::increase_used(ShenandoahFreeMemoryType which_set, size_t bytes) { 84 assert (which_set > NotFree && which_set < NumFreeSets, "Set must correspond to a valid freeset"); 85 _used_by[which_set] += bytes; 86 assert (_used_by[which_set] <= _capacity_of[which_set], 87 "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT, 88 _used_by[which_set], _capacity_of[which_set], bytes); 89 } 90 91 inline void ShenandoahSetsOfFree::shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx) { 92 if (idx == _leftmosts[set]) { 93 while ((_leftmosts[set] < _max) && !in_free_set(_leftmosts[set], set)) { 94 _leftmosts[set]++; 95 } 96 if (_leftmosts_empty[set] < _leftmosts[set]) { 97 // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested. 98 _leftmosts_empty[set] = _leftmosts[set]; 99 } 100 } 101 if (idx == _rightmosts[set]) { 102 while (_rightmosts[set] > 0 && !in_free_set(_rightmosts[set], set)) { 103 _rightmosts[set]--; 104 } 105 if (_rightmosts_empty[set] > _rightmosts[set]) { 106 // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested. 107 _rightmosts_empty[set] = _rightmosts[set]; 108 } 109 } 110 } 111 112 inline void ShenandoahSetsOfFree::expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t region_capacity) { 113 if (region_capacity == _region_size_bytes) { 114 if (_leftmosts_empty[set] > idx) { 115 _leftmosts_empty[set] = idx; 116 } 117 if (_rightmosts_empty[set] < idx) { 118 _rightmosts_empty[set] = idx; 119 } 120 } 121 if (_leftmosts[set] > idx) { 122 _leftmosts[set] = idx; 123 } 124 if (_rightmosts[set] < idx) { 125 _rightmosts[set] = idx; 126 } 127 } 128 129 void ShenandoahSetsOfFree::remove_from_free_sets(size_t idx) { 130 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 131 ShenandoahFreeMemoryType orig_set = membership(idx); 132 assert (orig_set > NotFree && orig_set < NumFreeSets, "Cannot remove from free sets if not already free"); 133 _membership[idx] = NotFree; 134 shrink_bounds_if_touched(orig_set, idx); 135 136 _region_counts[orig_set]--; 137 _region_counts[NotFree]++; 138 } 139 140 141 void ShenandoahSetsOfFree::make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity) { 142 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 143 assert (_membership[idx] == NotFree, "Cannot make free if already free"); 144 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); 145 _membership[idx] = which_set; 146 _capacity_of[which_set] += region_capacity; 147 expand_bounds_maybe(which_set, idx, region_capacity); 148 149 _region_counts[NotFree]--; 150 _region_counts[which_set]++; 151 } 152 153 void ShenandoahSetsOfFree::move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity) { 154 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 155 assert ((new_set > NotFree) && (new_set < NumFreeSets), "New set must be valid"); 156 ShenandoahFreeMemoryType orig_set = _membership[idx]; 157 assert ((orig_set > NotFree) && (orig_set < NumFreeSets), "Cannot move free unless already free"); 158 // Expected transitions: 159 // During rebuild: Mutator => Collector 160 // Mutator empty => Collector 161 // During flip_to_gc: 162 // Mutator empty => Collector 163 // Mutator empty => Old Collector 164 // At start of update refs: 165 // Collector => Mutator 166 // OldCollector Empty => Mutator 167 assert((region_capacity <= _region_size_bytes && ((orig_set == Mutator && new_set == Collector) || (orig_set == Collector && new_set == Mutator))) 168 || (region_capacity == _region_size_bytes && ((orig_set == Mutator && new_set == Collector) || (orig_set == OldCollector && new_set == Mutator) || new_set == OldCollector)), 169 "Unexpected movement between sets"); 170 171 _membership[idx] = new_set; 172 _capacity_of[orig_set] -= region_capacity; 173 shrink_bounds_if_touched(orig_set, idx); 174 175 _capacity_of[new_set] += region_capacity; 176 expand_bounds_maybe(new_set, idx, region_capacity); 177 178 _region_counts[orig_set]--; 179 _region_counts[new_set]++; 180 } 181 182 inline ShenandoahFreeMemoryType ShenandoahSetsOfFree::membership(size_t idx) const { 183 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 184 return _membership[idx]; 185 } 186 187 // Returns true iff region idx is in the test_set free_set. Before returning true, asserts that the free 188 // set is not empty. Requires that test_set != NotFree or NumFreeSets. 189 inline bool ShenandoahSetsOfFree::in_free_set(size_t idx, ShenandoahFreeMemoryType test_set) const { 190 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 191 if (_membership[idx] == test_set) { 192 assert (test_set == NotFree || _free_set->alloc_capacity(idx) > 0, "Free regions must have alloc capacity"); 193 return true; 194 } else { 195 return false; 196 } 197 } 198 199 inline size_t ShenandoahSetsOfFree::leftmost(ShenandoahFreeMemoryType which_set) const { 200 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); 201 size_t idx = _leftmosts[which_set]; 202 if (idx >= _max) { 203 return _max; 204 } else { 205 assert (in_free_set(idx, which_set), "left-most region must be free"); 206 return idx; 207 } 208 } 209 210 inline size_t ShenandoahSetsOfFree::rightmost(ShenandoahFreeMemoryType which_set) const { 211 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); 212 size_t idx = _rightmosts[which_set]; 213 assert ((_leftmosts[which_set] == _max) || in_free_set(idx, which_set), "right-most region must be free"); 214 return idx; 215 } 216 217 inline bool ShenandoahSetsOfFree::is_empty(ShenandoahFreeMemoryType which_set) const { 218 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); 219 return (leftmost(which_set) > rightmost(which_set)); 220 } 221 222 size_t ShenandoahSetsOfFree::leftmost_empty(ShenandoahFreeMemoryType which_set) { 223 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); 224 for (size_t idx = _leftmosts_empty[which_set]; idx < _max; idx++) { 225 if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) { 226 _leftmosts_empty[which_set] = idx; 227 return idx; 228 } 229 } 230 _leftmosts_empty[which_set] = _max; 231 _rightmosts_empty[which_set] = 0; 232 return _max; 233 } 234 235 inline size_t ShenandoahSetsOfFree::rightmost_empty(ShenandoahFreeMemoryType which_set) { 236 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); 237 for (intptr_t idx = _rightmosts_empty[which_set]; idx >= 0; idx--) { 238 if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) { 239 _rightmosts_empty[which_set] = idx; 240 return idx; 241 } 242 } 243 _leftmosts_empty[which_set] = _max; 244 _rightmosts_empty[which_set] = 0; 245 return 0; 246 } 247 248 inline bool ShenandoahSetsOfFree::alloc_from_left_bias(ShenandoahFreeMemoryType which_set) { 249 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); 250 return _left_to_right_bias[which_set]; 251 } 252 253 void ShenandoahSetsOfFree::establish_alloc_bias(ShenandoahFreeMemoryType which_set) { 254 ShenandoahHeap* heap = ShenandoahHeap::heap(); 255 shenandoah_assert_heaplocked(); 256 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid"); 257 258 size_t middle = (_leftmosts[which_set] + _rightmosts[which_set]) / 2; 259 size_t available_in_first_half = 0; 260 size_t available_in_second_half = 0; 261 262 for (size_t index = _leftmosts[which_set]; index < middle; index++) { 263 if (in_free_set(index, which_set)) { 264 ShenandoahHeapRegion* r = heap->get_region(index); 265 available_in_first_half += r->free(); 266 } 267 } 268 for (size_t index = middle; index <= _rightmosts[which_set]; index++) { 269 if (in_free_set(index, which_set)) { 270 ShenandoahHeapRegion* r = heap->get_region(index); 271 available_in_second_half += r->free(); 272 } 273 } 274 275 // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed. 276 // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation 277 // request. Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the 278 // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate. 279 280 // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the 281 // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left. 282 _left_to_right_bias[which_set] = (available_in_second_half > available_in_first_half); 283 } 284 285 #ifdef ASSERT 286 void ShenandoahSetsOfFree::assert_bounds() { 287 288 size_t leftmosts[NumFreeSets]; 289 size_t rightmosts[NumFreeSets]; 290 size_t empty_leftmosts[NumFreeSets]; 291 size_t empty_rightmosts[NumFreeSets]; 292 293 for (int i = 0; i < NumFreeSets; i++) { 294 leftmosts[i] = _max; 295 empty_leftmosts[i] = _max; 296 rightmosts[i] = 0; 297 empty_rightmosts[i] = 0; 298 } 299 300 for (size_t i = 0; i < _max; i++) { 301 ShenandoahFreeMemoryType set = membership(i); 302 switch (set) { 303 case NotFree: 304 break; 305 306 case Mutator: 307 case Collector: 308 case OldCollector: 309 { 310 size_t capacity = _free_set->alloc_capacity(i); 311 bool is_empty = (capacity == _region_size_bytes); 312 assert(capacity > 0, "free regions must have allocation capacity"); 313 if (i < leftmosts[set]) { 314 leftmosts[set] = i; 315 } 316 if (is_empty && (i < empty_leftmosts[set])) { 317 empty_leftmosts[set] = i; 318 } 319 if (i > rightmosts[set]) { 320 rightmosts[set] = i; 321 } 322 if (is_empty && (i > empty_rightmosts[set])) { 323 empty_rightmosts[set] = i; 324 } 325 break; 326 } 327 328 case NumFreeSets: 329 default: 330 ShouldNotReachHere(); 331 } 332 } 333 334 // Performance invariants. Failing these would not break the free set, but performance would suffer. 335 assert (leftmost(Mutator) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Mutator), _max); 336 assert (rightmost(Mutator) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Mutator), _max); 337 338 assert (leftmost(Mutator) == _max || in_free_set(leftmost(Mutator), Mutator), 339 "leftmost region should be free: " SIZE_FORMAT, leftmost(Mutator)); 340 assert (leftmost(Mutator) == _max || in_free_set(rightmost(Mutator), Mutator), 341 "rightmost region should be free: " SIZE_FORMAT, rightmost(Mutator)); 342 343 // If Mutator set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets. 344 size_t beg_off = leftmosts[Mutator]; 345 size_t end_off = rightmosts[Mutator]; 346 assert (beg_off >= leftmost(Mutator), 347 "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Mutator)); 348 assert (end_off <= rightmost(Mutator), 349 "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(Mutator)); 350 351 beg_off = empty_leftmosts[Mutator]; 352 end_off = empty_rightmosts[Mutator]; 353 assert (beg_off >= leftmost_empty(Mutator), 354 "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Mutator)); 355 assert (end_off <= rightmost_empty(Mutator), 356 "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(Mutator)); 357 358 // Performance invariants. Failing these would not break the free set, but performance would suffer. 359 assert (leftmost(Collector) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Collector), _max); 360 assert (rightmost(Collector) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Collector), _max); 361 362 assert (leftmost(Collector) == _max || in_free_set(leftmost(Collector), Collector), 363 "leftmost region should be free: " SIZE_FORMAT, leftmost(Collector)); 364 assert (leftmost(Collector) == _max || in_free_set(rightmost(Collector), Collector), 365 "rightmost region should be free: " SIZE_FORMAT, rightmost(Collector)); 366 367 // If Collector set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets. 368 beg_off = leftmosts[Collector]; 369 end_off = rightmosts[Collector]; 370 assert (beg_off >= leftmost(Collector), 371 "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Collector)); 372 assert (end_off <= rightmost(Collector), 373 "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(Collector)); 374 375 beg_off = empty_leftmosts[Collector]; 376 end_off = empty_rightmosts[Collector]; 377 assert (beg_off >= leftmost_empty(Collector), 378 "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Collector)); 379 assert (end_off <= rightmost_empty(Collector), 380 "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(Collector)); 381 382 // Performance invariants. Failing these would not break the free set, but performance would suffer. 383 assert (leftmost(OldCollector) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(OldCollector), _max); 384 assert (rightmost(OldCollector) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(OldCollector), _max); 385 386 assert (leftmost(OldCollector) == _max || in_free_set(leftmost(OldCollector), OldCollector), 387 "leftmost region should be free: " SIZE_FORMAT, leftmost(OldCollector)); 388 assert (leftmost(OldCollector) == _max || in_free_set(rightmost(OldCollector), OldCollector), 389 "rightmost region should be free: " SIZE_FORMAT, rightmost(OldCollector)); 390 391 // If OldCollector set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets. 392 beg_off = leftmosts[OldCollector]; 393 end_off = rightmosts[OldCollector]; 394 assert (beg_off >= leftmost(OldCollector), 395 "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(OldCollector)); 396 assert (end_off <= rightmost(OldCollector), 397 "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(OldCollector)); 398 399 beg_off = empty_leftmosts[OldCollector]; 400 end_off = empty_rightmosts[OldCollector]; 401 assert (beg_off >= leftmost_empty(OldCollector), 402 "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(OldCollector)); 403 assert (end_off <= rightmost_empty(OldCollector), 404 "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(OldCollector)); 405 } 406 #endif 407 408 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : 409 _heap(heap), 410 _free_sets(max_regions, this) 411 { 412 clear_internal(); 413 } 414 415 // This allocates from a region within the old_collector_set. If affiliation equals OLD, the allocation must be taken 416 // from a region that is_old(). Otherwise, affiliation should be FREE, in which case this will put a previously unaffiliated 417 // region into service. 418 HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation affiliation, 419 ShenandoahAllocRequest& req, bool& in_new_region) { 420 shenandoah_assert_heaplocked(); 421 422 size_t rightmost = 423 (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(OldCollector): _free_sets.rightmost(OldCollector); 424 size_t leftmost = 425 (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(OldCollector): _free_sets.leftmost(OldCollector); 426 if (_free_sets.alloc_from_left_bias(OldCollector)) { 427 // This mode picks up stragglers left by a full GC 428 for (size_t idx = leftmost; idx <= rightmost; idx++) { 429 if (_free_sets.in_free_set(idx, OldCollector)) { 430 ShenandoahHeapRegion* r = _heap->get_region(idx); 431 assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation"); 432 if (r->affiliation() == affiliation) { 433 HeapWord* result = try_allocate_in(r, req, in_new_region); 434 if (result != nullptr) { 435 return result; 436 } 437 } 438 } 439 } 440 } else { 441 // This mode picks up stragglers left by a previous concurrent GC 442 for (size_t count = rightmost + 1; count > leftmost; count--) { 443 // size_t is unsigned, need to dodge underflow when _leftmost = 0 444 size_t idx = count - 1; 445 if (_free_sets.in_free_set(idx, OldCollector)) { 446 ShenandoahHeapRegion* r = _heap->get_region(idx); 447 assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation"); 448 if (r->affiliation() == affiliation) { 449 HeapWord* result = try_allocate_in(r, req, in_new_region); 450 if (result != nullptr) { 451 return result; 452 } 453 } 454 } 455 } 456 } 457 return nullptr; 458 } 459 460 void ShenandoahFreeSet::add_old_collector_free_region(ShenandoahHeapRegion* region) { 461 shenandoah_assert_heaplocked(); 462 size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize; 463 size_t idx = region->index(); 464 size_t capacity = alloc_capacity(region); 465 assert(_free_sets.membership(idx) == NotFree, "Regions promoted in place should not be in any free set"); 466 if (capacity >= plab_min_size_in_bytes) { 467 _free_sets.make_free(idx, OldCollector, capacity); 468 _heap->old_generation()->augment_promoted_reserve(capacity); 469 } 470 } 471 472 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation, 473 ShenandoahAllocRequest& req, bool& in_new_region) { 474 shenandoah_assert_heaplocked(); 475 size_t rightmost = 476 (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(Collector): _free_sets.rightmost(Collector); 477 size_t leftmost = 478 (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(Collector): _free_sets.leftmost(Collector); 479 for (size_t c = rightmost + 1; c > leftmost; c--) { 480 // size_t is unsigned, need to dodge underflow when _leftmost = 0 481 size_t idx = c - 1; 482 if (_free_sets.in_free_set(idx, Collector)) { 483 ShenandoahHeapRegion* r = _heap->get_region(idx); 484 if (r->affiliation() == affiliation) { 485 HeapWord* result = try_allocate_in(r, req, in_new_region); 486 if (result != nullptr) { 487 return result; 488 } 489 } 490 } 491 } 492 log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT, 493 shenandoah_affiliation_name(affiliation), p2i(&req)); 494 return nullptr; 495 } 496 497 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) { 498 shenandoah_assert_heaplocked(); 499 500 // Scan the bitmap looking for a first fit. 501 // 502 // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally, 503 // we would find the region to allocate at right away. 504 // 505 // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs 506 // go to the end. This makes application allocation faster, because we would clear lots 507 // of regions from the beginning most of the time. 508 // 509 // Free set maintains mutator and collector views, and normally they allocate in their views only, 510 // unless we special cases for stealing and mixed allocations. 511 512 // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping. 513 bool allow_new_region = true; 514 if (_heap->mode()->is_generational()) { 515 switch (req.affiliation()) { 516 case ShenandoahAffiliation::OLD_GENERATION: 517 // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero. 518 if (_heap->old_generation()->free_unaffiliated_regions() <= 0) { 519 allow_new_region = false; 520 } 521 break; 522 523 case ShenandoahAffiliation::YOUNG_GENERATION: 524 // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero. 525 if (_heap->young_generation()->free_unaffiliated_regions() <= 0) { 526 allow_new_region = false; 527 } 528 break; 529 530 case ShenandoahAffiliation::FREE: 531 fatal("Should request affiliation"); 532 533 default: 534 ShouldNotReachHere(); 535 break; 536 } 537 } 538 switch (req.type()) { 539 case ShenandoahAllocRequest::_alloc_tlab: 540 case ShenandoahAllocRequest::_alloc_shared: { 541 // Try to allocate in the mutator view 542 // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations 543 if (!_free_sets.is_empty(Mutator)) { 544 // Use signed idx. Otherwise, loop will never terminate. 545 int leftmost = (int) _free_sets.leftmost(Mutator); 546 for (int idx = (int) _free_sets.rightmost(Mutator); idx >= leftmost; idx--) { 547 ShenandoahHeapRegion* r = _heap->get_region(idx); 548 if (_free_sets.in_free_set(idx, Mutator) && (allow_new_region || r->is_affiliated())) { 549 // try_allocate_in() increases used if the allocation is successful. 550 HeapWord* result; 551 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size(); 552 if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) { 553 return result; 554 } 555 } 556 } 557 } 558 // There is no recovery. Mutator does not touch collector view at all. 559 break; 560 } 561 case ShenandoahAllocRequest::_alloc_gclab: 562 // GCLABs are for evacuation so we must be in evacuation phase. If this allocation is successful, increment 563 // the relevant evac_expended rather than used value. 564 565 case ShenandoahAllocRequest::_alloc_plab: 566 // PLABs always reside in old-gen and are only allocated during evacuation phase. 567 568 case ShenandoahAllocRequest::_alloc_shared_gc: { 569 if (!_heap->mode()->is_generational()) { 570 // size_t is unsigned, need to dodge underflow when _leftmost = 0 571 // Fast-path: try to allocate in the collector view first 572 for (size_t c = _free_sets.rightmost(Collector) + 1; c > _free_sets.leftmost(Collector); c--) { 573 size_t idx = c - 1; 574 if (_free_sets.in_free_set(idx, Collector)) { 575 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); 576 if (result != nullptr) { 577 return result; 578 } 579 } 580 } 581 } else { 582 // First try to fit into a region that is already in use in the same generation. 583 HeapWord* result; 584 if (req.is_old()) { 585 result = allocate_old_with_affiliation(req.affiliation(), req, in_new_region); 586 } else { 587 result = allocate_with_affiliation(req.affiliation(), req, in_new_region); 588 } 589 if (result != nullptr) { 590 return result; 591 } 592 if (allow_new_region) { 593 // Then try a free region that is dedicated to GC allocations. 594 if (req.is_old()) { 595 result = allocate_old_with_affiliation(FREE, req, in_new_region); 596 } else { 597 result = allocate_with_affiliation(FREE, req, in_new_region); 598 } 599 if (result != nullptr) { 600 return result; 601 } 602 } 603 } 604 // No dice. Can we borrow space from mutator view? 605 if (!ShenandoahEvacReserveOverflow) { 606 return nullptr; 607 } 608 609 if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) { 610 // This allows us to flip a mutator region to old_collector 611 allow_new_region = true; 612 } 613 614 // We should expand old-gen if this can prevent an old-gen evacuation failure. We don't care so much about 615 // promotion failures since they can be mitigated in a subsequent GC pass. Would be nice to know if this 616 // allocation request is for evacuation or promotion. Individual threads limit their use of PLAB memory for 617 // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used 618 // only for old-gen evacuations. 619 620 // Also TODO: 621 // if (GC is idle (out of cycle) and mutator allocation fails and there is memory reserved in Collector 622 // or OldCollector sets, transfer a region of memory so that we can satisfy the allocation request, and 623 // immediately trigger the start of GC. Is better to satisfy the allocation than to trigger out-of-cycle 624 // allocation failure (even if this means we have a little less memory to handle evacuations during the 625 // subsequent GC pass). 626 627 if (allow_new_region) { 628 // Try to steal an empty region from the mutator view. 629 for (size_t c = _free_sets.rightmost_empty(Mutator) + 1; c > _free_sets.leftmost_empty(Mutator); c--) { 630 size_t idx = c - 1; 631 if (_free_sets.in_free_set(idx, Mutator)) { 632 ShenandoahHeapRegion* r = _heap->get_region(idx); 633 if (can_allocate_from(r)) { 634 if (req.is_old()) { 635 flip_to_old_gc(r); 636 } else { 637 flip_to_gc(r); 638 } 639 HeapWord *result = try_allocate_in(r, req, in_new_region); 640 if (result != nullptr) { 641 log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req)); 642 return result; 643 } 644 } 645 } 646 } 647 } 648 649 // No dice. Do not try to mix mutator and GC allocations, because 650 // URWM moves due to GC allocations would expose unparsable mutator 651 // allocations. 652 break; 653 } 654 default: 655 ShouldNotReachHere(); 656 } 657 return nullptr; 658 } 659 660 // This work method takes an argument corresponding to the number of bytes 661 // free in a region, and returns the largest amount in heapwords that can be allocated 662 // such that both of the following conditions are satisfied: 663 // 664 // 1. it is a multiple of card size 665 // 2. any remaining shard may be filled with a filler object 666 // 667 // The idea is that the allocation starts and ends at card boundaries. Because 668 // a region ('s end) is card-aligned, the remainder shard that must be filled is 669 // at the start of the free space. 670 // 671 // This is merely a helper method to use for the purpose of such a calculation. 672 size_t get_usable_free_words(size_t free_bytes) { 673 // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8 674 // free is 514 675 // usable_free is 512, which is decreased to 0 676 size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift(); 677 assert(usable_free <= free_bytes, "Sanity check"); 678 if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) { 679 // After aligning to card multiples, the remainder would be smaller than 680 // the minimum filler object, so we'll need to take away another card's 681 // worth to construct a filler object. 682 if (usable_free >= CardTable::card_size()) { 683 usable_free -= CardTable::card_size(); 684 } else { 685 assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size"); 686 } 687 } 688 689 return usable_free / HeapWordSize; 690 } 691 692 // Given a size argument, which is a multiple of card size, a request struct 693 // for a PLAB, and an old region, return a pointer to the allocated space for 694 // a PLAB which is card-aligned and where any remaining shard in the region 695 // has been suitably filled by a filler object. 696 // It is assumed (and assertion-checked) that such an allocation is always possible. 697 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) { 698 assert(_heap->mode()->is_generational(), "PLABs are only for generational mode"); 699 assert(r->is_old(), "All PLABs reside in old-gen"); 700 assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators."); 701 assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design"); 702 703 HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size()); 704 assert(result != nullptr, "Allocation cannot fail"); 705 assert(r->top() <= r->end(), "Allocation cannot span end of region"); 706 assert(req.actual_size() == size, "Should not have needed to adjust size for PLAB."); 707 assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design"); 708 709 return result; 710 } 711 712 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { 713 assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()); 714 if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) { 715 return nullptr; 716 } 717 718 try_recycle_trashed(r); 719 if (!r->is_affiliated()) { 720 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); 721 r->set_affiliation(req.affiliation()); 722 if (r->is_old()) { 723 // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because 724 // all objects allocated within this region are above TAMS (and thus are implicitly marked). In case this is an 725 // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next 726 // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before 727 // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any 728 // coalesce-and-fill processing. 729 r->end_preemptible_coalesce_and_fill(); 730 _heap->old_generation()->clear_cards_for(r); 731 } 732 _heap->generation_for(r->affiliation())->increment_affiliated_region_count(); 733 734 assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); 735 assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear"); 736 } else if (r->affiliation() != req.affiliation()) { 737 assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.", 738 req.affiliation_name(), r->affiliation_name()); 739 return nullptr; 740 } 741 742 in_new_region = r->is_empty(); 743 HeapWord* result = nullptr; 744 745 if (in_new_region) { 746 log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").", 747 r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req)); 748 } 749 750 // req.size() is in words, r->free() is in bytes. 751 if (req.is_lab_alloc()) { 752 if (req.type() == ShenandoahAllocRequest::_alloc_plab) { 753 assert(_heap->mode()->is_generational(), "PLABs are only for generational mode"); 754 assert(_free_sets.in_free_set(r->index(), OldCollector), "PLABS must be allocated in old_collector_free regions"); 755 // Need to assure that plabs are aligned on multiple of card region. 756 // Since we have Elastic TLABs, align sizes up. They may be decreased to fit in the usable 757 // memory remaining in the region (which will also be aligned to cards). 758 size_t adjusted_size = align_up(req.size(), CardTable::card_size_in_words()); 759 size_t adjusted_min_size = align_up(req.min_size(), CardTable::card_size_in_words()); 760 size_t usable_free = get_usable_free_words(r->free()); 761 762 if (adjusted_size > usable_free) { 763 adjusted_size = usable_free; 764 } 765 766 if (adjusted_size >= adjusted_min_size) { 767 result = allocate_aligned_plab(adjusted_size, req, r); 768 } 769 // Otherwise, leave result == nullptr because the adjusted size is smaller than min size. 770 } else { 771 // This is a GCLAB or a TLAB allocation 772 size_t adjusted_size = req.size(); 773 size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment); 774 if (adjusted_size > free) { 775 adjusted_size = free; 776 } 777 if (adjusted_size >= req.min_size()) { 778 result = r->allocate(adjusted_size, req); 779 assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size); 780 req.set_actual_size(adjusted_size); 781 } else { 782 log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT 783 " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size()); 784 } 785 } 786 } else { 787 size_t size = req.size(); 788 result = r->allocate(size, req); 789 if (result != nullptr) { 790 // Record actual allocation size 791 req.set_actual_size(size); 792 } 793 } 794 795 ShenandoahGeneration* generation = _heap->generation_for(req.affiliation()); 796 if (result != nullptr) { 797 // Allocation successful, bump stats: 798 if (req.is_mutator_alloc()) { 799 assert(req.is_young(), "Mutator allocations always come from young generation."); 800 _free_sets.increase_used(Mutator, req.actual_size() * HeapWordSize); 801 } else { 802 assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc"); 803 804 // For GC allocations, we advance update_watermark because the objects relocated into this memory during 805 // evacuation are not updated during evacuation. For both young and old regions r, it is essential that all 806 // PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation. 807 // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require 808 // that the PLAB be disabled for all future purposes. We may want to introduce a new service to make the 809 // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the 810 // next evacuation pass. 811 r->set_update_watermark(r->top()); 812 if (r->is_old()) { 813 assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation"); 814 // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab 815 } 816 } 817 } 818 819 if (result == nullptr || alloc_capacity(r) < PLAB::min_size() * HeapWordSize) { 820 // Region cannot afford this and is likely to not afford future allocations. Retire it. 821 // 822 // While this seems a bit harsh, especially in the case when this large allocation does not 823 // fit but the next small one would, we are risking to inflate scan times when lots of 824 // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB. 825 826 // Record the remainder as allocation waste 827 size_t idx = r->index(); 828 if (req.is_mutator_alloc()) { 829 size_t waste = r->free(); 830 if (waste > 0) { 831 _free_sets.increase_used(Mutator, waste); 832 // This one request could cause several regions to be "retired", so we must accumulate the waste 833 req.set_waste((waste >> LogHeapWordSize) + req.waste()); 834 } 835 assert(_free_sets.membership(idx) == Mutator, "Must be mutator free: " SIZE_FORMAT, idx); 836 } else { 837 assert(_free_sets.membership(idx) == Collector || _free_sets.membership(idx) == OldCollector, 838 "Must be collector or old-collector free: " SIZE_FORMAT, idx); 839 } 840 // This region is no longer considered free (in any set) 841 _free_sets.remove_from_free_sets(idx); 842 _free_sets.assert_bounds(); 843 } 844 return result; 845 } 846 847 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { 848 shenandoah_assert_heaplocked(); 849 850 size_t words_size = req.size(); 851 size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 852 853 assert(req.is_young(), "Humongous regions always allocated in YOUNG"); 854 ShenandoahGeneration* generation = _heap->generation_for(req.affiliation()); 855 856 // Check if there are enough regions left to satisfy allocation. 857 if (_heap->mode()->is_generational()) { 858 size_t avail_young_regions = generation->free_unaffiliated_regions(); 859 if (num > _free_sets.count(Mutator) || (num > avail_young_regions)) { 860 return nullptr; 861 } 862 } else { 863 if (num > _free_sets.count(Mutator)) { 864 return nullptr; 865 } 866 } 867 868 // Find the continuous interval of $num regions, starting from $beg and ending in $end, 869 // inclusive. Contiguous allocations are biased to the beginning. 870 871 size_t beg = _free_sets.leftmost(Mutator); 872 size_t end = beg; 873 874 while (true) { 875 if (end >= _free_sets.max()) { 876 // Hit the end, goodbye 877 return nullptr; 878 } 879 880 // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward. 881 // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. 882 if (!_free_sets.in_free_set(end, Mutator) || !can_allocate_from(_heap->get_region(end))) { 883 end++; 884 beg = end; 885 continue; 886 } 887 888 if ((end - beg + 1) == num) { 889 // found the match 890 break; 891 } 892 893 end++; 894 } 895 896 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 897 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context(); 898 899 // Initialize regions: 900 for (size_t i = beg; i <= end; i++) { 901 ShenandoahHeapRegion* r = _heap->get_region(i); 902 try_recycle_trashed(r); 903 904 assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); 905 assert(r->is_empty(), "Should be empty"); 906 907 if (i == beg) { 908 r->make_humongous_start(); 909 } else { 910 r->make_humongous_cont(); 911 } 912 913 // Trailing region may be non-full, record the remainder there 914 size_t used_words; 915 if ((i == end) && (remainder != 0)) { 916 used_words = remainder; 917 } else { 918 used_words = ShenandoahHeapRegion::region_size_words(); 919 } 920 921 r->set_affiliation(req.affiliation()); 922 r->set_update_watermark(r->bottom()); 923 r->set_top(r->bottom() + used_words); 924 925 // While individual regions report their true use, all humongous regions are marked used in the free set. 926 _free_sets.remove_from_free_sets(r->index()); 927 } 928 generation->increase_affiliated_region_count(num); 929 930 size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; 931 _free_sets.increase_used(Mutator, total_humongous_size); 932 _free_sets.assert_bounds(); 933 req.set_actual_size(words_size); 934 if (remainder != 0) { 935 req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder); 936 } 937 return _heap->get_region(beg)->bottom(); 938 } 939 940 // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent 941 // immediate trash and we'll be able to immediately recycle it. Note that we cannot recycle immediate trash if 942 // concurrent weak root processing is in progress. 943 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const { 944 return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress()); 945 } 946 947 bool ShenandoahFreeSet::can_allocate_from(size_t idx) const { 948 ShenandoahHeapRegion* r = _heap->get_region(idx); 949 return can_allocate_from(r); 950 } 951 952 size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const { 953 ShenandoahHeapRegion* r = _heap->get_region(idx); 954 return alloc_capacity(r); 955 } 956 957 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const { 958 if (r->is_trash()) { 959 // This would be recycled on allocation path 960 return ShenandoahHeapRegion::region_size_bytes(); 961 } else { 962 return r->free(); 963 } 964 } 965 966 bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const { 967 return alloc_capacity(r) > 0; 968 } 969 970 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) { 971 if (r->is_trash()) { 972 r->recycle(); 973 } 974 } 975 976 void ShenandoahFreeSet::recycle_trash() { 977 // lock is not reentrable, check we don't have it 978 shenandoah_assert_not_heaplocked(); 979 980 for (size_t i = 0; i < _heap->num_regions(); i++) { 981 ShenandoahHeapRegion* r = _heap->get_region(i); 982 if (r->is_trash()) { 983 ShenandoahHeapLocker locker(_heap->lock()); 984 try_recycle_trashed(r); 985 } 986 SpinPause(); // allow allocators to take the lock 987 } 988 } 989 990 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { 991 size_t idx = r->index(); 992 993 assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view"); 994 // Note: can_allocate_from(r) means r is entirely empty 995 assert(can_allocate_from(r), "Should not be allocated"); 996 997 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(_heap); 998 size_t region_capacity = alloc_capacity(r); 999 _free_sets.move_to_set(idx, OldCollector, region_capacity); 1000 _free_sets.assert_bounds(); 1001 _heap->old_generation()->augment_evacuation_reserve(region_capacity); 1002 bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); 1003 if (!transferred) { 1004 log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx); 1005 gen_heap->generation_sizer()->force_transfer_to_old(1); 1006 } 1007 // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, 1008 // to recycle trash before attempting to allocate anything in the region. 1009 } 1010 1011 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { 1012 size_t idx = r->index(); 1013 1014 assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view"); 1015 assert(can_allocate_from(r), "Should not be allocated"); 1016 1017 size_t region_capacity = alloc_capacity(r); 1018 _free_sets.move_to_set(idx, Collector, region_capacity); 1019 _free_sets.assert_bounds(); 1020 1021 // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, 1022 // to recycle trash before attempting to allocate anything in the region. 1023 } 1024 1025 void ShenandoahFreeSet::clear() { 1026 shenandoah_assert_heaplocked(); 1027 clear_internal(); 1028 } 1029 1030 void ShenandoahFreeSet::clear_internal() { 1031 _free_sets.clear_all(); 1032 } 1033 1034 // This function places all is_old() regions that have allocation capacity into the old_collector set. It places 1035 // all other regions (not is_old()) that have allocation capacity into the mutator_set. Subsequently, we will 1036 // move some of the mutator regions into the collector set or old_collector set with the intent of packing 1037 // old_collector memory into the highest (rightmost) addresses of the heap and the collector memory into the 1038 // next highest addresses of the heap, with mutator memory consuming the lowest addresses of the heap. 1039 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions, 1040 size_t &first_old_region, size_t &last_old_region, 1041 size_t &old_region_count) { 1042 first_old_region = _heap->num_regions(); 1043 last_old_region = 0; 1044 old_region_count = 0; 1045 old_cset_regions = 0; 1046 young_cset_regions = 0; 1047 for (size_t idx = 0; idx < _heap->num_regions(); idx++) { 1048 ShenandoahHeapRegion* region = _heap->get_region(idx); 1049 if (region->is_trash()) { 1050 // Trashed regions represent regions that had been in the collection set but have not yet been "cleaned up". 1051 if (region->is_old()) { 1052 old_cset_regions++; 1053 } else { 1054 assert(region->is_young(), "Trashed region should be old or young"); 1055 young_cset_regions++; 1056 } 1057 } else if (region->is_old() && region->is_regular()) { 1058 old_region_count++; 1059 if (first_old_region > idx) { 1060 first_old_region = idx; 1061 } 1062 last_old_region = idx; 1063 } 1064 if (region->is_alloc_allowed() || region->is_trash()) { 1065 assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set"); 1066 assert(_free_sets.in_free_set(idx, NotFree), "We are about to make region free; it should not be free already"); 1067 1068 // Do not add regions that would almost surely fail allocation. Note that PLAB::min_size() is typically less than ShenandoahGenerationalHeap::plab_min_size() 1069 if (alloc_capacity(region) < PLAB::min_size() * HeapWordSize) continue; 1070 1071 if (region->is_old()) { 1072 _free_sets.make_free(idx, OldCollector, alloc_capacity(region)); 1073 log_debug(gc, free)( 1074 " Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to old collector set", 1075 idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()), 1076 byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used())); 1077 } else { 1078 _free_sets.make_free(idx, Mutator, alloc_capacity(region)); 1079 log_debug(gc, free)( 1080 " Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator set", 1081 idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()), 1082 byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used())); 1083 } 1084 } 1085 } 1086 } 1087 1088 // Move no more than cset_regions from the existing Collector and OldCollector free sets to the Mutator free set. 1089 // This is called from outside the heap lock. 1090 void ShenandoahFreeSet::move_collector_sets_to_mutator(size_t max_xfer_regions) { 1091 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 1092 size_t collector_empty_xfer = 0; 1093 size_t collector_not_empty_xfer = 0; 1094 size_t old_collector_empty_xfer = 0; 1095 1096 // Process empty regions within the Collector free set 1097 if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(Collector) <= _free_sets.rightmost_empty(Collector))) { 1098 ShenandoahHeapLocker locker(_heap->lock()); 1099 for (size_t idx = _free_sets.leftmost_empty(Collector); 1100 (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(Collector)); idx++) { 1101 if (_free_sets.in_free_set(idx, Collector) && can_allocate_from(idx)) { 1102 _free_sets.move_to_set(idx, Mutator, region_size_bytes); 1103 max_xfer_regions--; 1104 collector_empty_xfer += region_size_bytes; 1105 } 1106 } 1107 } 1108 1109 // Process empty regions within the OldCollector free set 1110 size_t old_collector_regions = 0; 1111 if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(OldCollector) <= _free_sets.rightmost_empty(OldCollector))) { 1112 ShenandoahHeapLocker locker(_heap->lock()); 1113 for (size_t idx = _free_sets.leftmost_empty(OldCollector); 1114 (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(OldCollector)); idx++) { 1115 if (_free_sets.in_free_set(idx, OldCollector) && can_allocate_from(idx)) { 1116 _free_sets.move_to_set(idx, Mutator, region_size_bytes); 1117 max_xfer_regions--; 1118 old_collector_empty_xfer += region_size_bytes; 1119 old_collector_regions++; 1120 } 1121 } 1122 if (old_collector_regions > 0) { 1123 ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions); 1124 } 1125 } 1126 1127 // If there are any non-empty regions within Collector set, we can also move them to the Mutator free set 1128 if ((max_xfer_regions > 0) && (_free_sets.leftmost(Collector) <= _free_sets.rightmost(Collector))) { 1129 ShenandoahHeapLocker locker(_heap->lock()); 1130 for (size_t idx = _free_sets.leftmost(Collector); (max_xfer_regions > 0) && (idx <= _free_sets.rightmost(Collector)); idx++) { 1131 size_t alloc_capacity = this->alloc_capacity(idx); 1132 if (_free_sets.in_free_set(idx, Collector) && (alloc_capacity > 0)) { 1133 _free_sets.move_to_set(idx, Mutator, alloc_capacity); 1134 max_xfer_regions--; 1135 collector_not_empty_xfer += alloc_capacity; 1136 } 1137 } 1138 } 1139 1140 size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer; 1141 size_t total_xfer = collector_xfer + old_collector_empty_xfer; 1142 log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve (" 1143 SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)", 1144 byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer), 1145 byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer), 1146 byte_size_in_proper_unit(old_collector_empty_xfer), proper_unit_for_byte_size(old_collector_empty_xfer)); 1147 } 1148 1149 1150 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled 1151 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions, 1152 size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) { 1153 shenandoah_assert_heaplocked(); 1154 // This resets all state information, removing all regions from all sets. 1155 clear(); 1156 log_debug(gc, free)("Rebuilding FreeSet"); 1157 1158 // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the 1159 // mutator set otherwise. 1160 find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); 1161 } 1162 1163 void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves) { 1164 shenandoah_assert_heaplocked(); 1165 size_t young_reserve(0), old_reserve(0); 1166 1167 if (!_heap->mode()->is_generational()) { 1168 young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve; 1169 old_reserve = 0; 1170 } else { 1171 compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves, 1172 young_reserve, old_reserve); 1173 1174 } 1175 1176 reserve_regions(young_reserve, old_reserve); 1177 _free_sets.establish_alloc_bias(OldCollector); 1178 _free_sets.assert_bounds(); 1179 log_status(); 1180 } 1181 1182 void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves, 1183 size_t& young_reserve_result, size_t& old_reserve_result) const { 1184 shenandoah_assert_generational(); 1185 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 1186 1187 ShenandoahOldGeneration* const old_generation = _heap->old_generation(); 1188 size_t old_available = old_generation->available(); 1189 size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions(); 1190 ShenandoahYoungGeneration* const young_generation = _heap->young_generation(); 1191 size_t young_capacity = young_generation->max_capacity(); 1192 size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions(); 1193 1194 // Add in the regions we anticipate to be freed by evacuation of the collection set 1195 old_unaffiliated_regions += old_cset_regions; 1196 old_available += old_cset_regions * region_size_bytes; 1197 young_unaffiliated_regions += young_cset_regions; 1198 1199 // Consult old-region balance to make adjustments to current generation capacities and availability. 1200 // The generation region transfers take place after we rebuild. 1201 const ssize_t old_region_balance = old_generation->get_region_balance(); 1202 if (old_region_balance != 0) { 1203 if (old_region_balance > 0) { 1204 assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated"); 1205 } else { 1206 assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated"); 1207 } 1208 1209 ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes); 1210 old_available -= xfer_bytes; 1211 old_unaffiliated_regions -= old_region_balance; 1212 young_capacity += xfer_bytes; 1213 young_unaffiliated_regions += old_region_balance; 1214 } 1215 1216 // All allocations taken from the old collector set are performed by GC, generally using PLABs for both 1217 // promotions and evacuations. The partition between which old memory is reserved for evacuation and 1218 // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for 1219 // each PLAB's available memory. 1220 if (have_evacuation_reserves) { 1221 // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass. 1222 const size_t promoted_reserve = old_generation->get_promoted_reserve(); 1223 const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); 1224 young_reserve_result = young_generation->get_evacuation_reserve(); 1225 old_reserve_result = promoted_reserve + old_evac_reserve; 1226 assert(old_reserve_result <= old_available, 1227 "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT, 1228 promoted_reserve, old_evac_reserve, old_available); 1229 } else { 1230 // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) 1231 young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100; 1232 // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions. 1233 // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of 1234 // unaffiliated regions. 1235 old_reserve_result = old_available; 1236 } 1237 1238 // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector 1239 // free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust 1240 // the reserve downward to account for this possibility. This loss is part of the reason why the original budget 1241 // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers. 1242 if (old_reserve_result > _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes) { 1243 old_reserve_result = _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes; 1244 } 1245 1246 if (old_reserve_result > young_unaffiliated_regions * region_size_bytes) { 1247 young_reserve_result = young_unaffiliated_regions * region_size_bytes; 1248 } 1249 } 1250 1251 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young() 1252 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set 1253 // into the collector set or old collector set in order to assure that the memory available for allocations within 1254 // the collector set is at least to_reserve, and the memory available for allocations within the old collector set 1255 // is at least to_reserve_old. 1256 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old) { 1257 for (size_t i = _heap->num_regions(); i > 0; i--) { 1258 size_t idx = i - 1; 1259 ShenandoahHeapRegion* r = _heap->get_region(idx); 1260 if (!_free_sets.in_free_set(idx, Mutator)) { 1261 continue; 1262 } 1263 1264 size_t ac = alloc_capacity(r); 1265 assert (ac > 0, "Membership in free set implies has capacity"); 1266 assert (!r->is_old(), "mutator_is_free regions should not be affiliated OLD"); 1267 1268 bool move_to_old = _free_sets.capacity_of(OldCollector) < to_reserve_old; 1269 bool move_to_young = _free_sets.capacity_of(Collector) < to_reserve; 1270 1271 if (!move_to_old && !move_to_young) { 1272 // We've satisfied both to_reserve and to_reserved_old 1273 break; 1274 } 1275 1276 if (move_to_old) { 1277 if (r->is_trash() || !r->is_affiliated()) { 1278 // OLD regions that have available memory are already in the old_collector free set 1279 _free_sets.move_to_set(idx, OldCollector, ac); 1280 log_debug(gc, free)(" Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx); 1281 continue; 1282 } 1283 } 1284 1285 if (move_to_young) { 1286 // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if 1287 // they were entirely empty. I'm not sure I understand the rationale for that. That alternative behavior would 1288 // tend to mix survivor objects with ephemeral objects, making it more difficult to reclaim the memory for the 1289 // ephemeral objects. It also delays aging of regions, causing promotion in place to be delayed. 1290 _free_sets.move_to_set(idx, Collector, ac); 1291 log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx); 1292 } 1293 } 1294 1295 if (LogTarget(Info, gc, free)::is_enabled()) { 1296 size_t old_reserve = _free_sets.capacity_of(OldCollector); 1297 if (old_reserve < to_reserve_old) { 1298 log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT, 1299 PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve)); 1300 } 1301 size_t young_reserve = _free_sets.capacity_of(Collector); 1302 if (young_reserve < to_reserve) { 1303 log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, 1304 PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve)); 1305 } 1306 } 1307 } 1308 1309 void ShenandoahFreeSet::log_status() { 1310 shenandoah_assert_heaplocked(); 1311 1312 #ifdef ASSERT 1313 // Dump of the FreeSet details is only enabled if assertions are enabled 1314 if (LogTarget(Debug, gc, free)::is_enabled()) { 1315 #define BUFFER_SIZE 80 1316 size_t retired_old = 0; 1317 size_t retired_old_humongous = 0; 1318 size_t retired_young = 0; 1319 size_t retired_young_humongous = 0; 1320 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 1321 size_t retired_young_waste = 0; 1322 size_t retired_old_waste = 0; 1323 size_t consumed_collector = 0; 1324 size_t consumed_old_collector = 0; 1325 size_t consumed_mutator = 0; 1326 size_t available_old = 0; 1327 size_t available_young = 0; 1328 size_t available_mutator = 0; 1329 size_t available_collector = 0; 1330 size_t available_old_collector = 0; 1331 1332 char buffer[BUFFER_SIZE]; 1333 for (uint i = 0; i < BUFFER_SIZE; i++) { 1334 buffer[i] = '\0'; 1335 } 1336 log_debug(gc, free)("FreeSet map legend:" 1337 " M:mutator_free C:collector_free O:old_collector_free" 1338 " H:humongous ~:retired old _:retired young"); 1339 log_debug(gc, free)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "], " 1340 " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], " 1341 "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s", 1342 _free_sets.leftmost(Mutator), _free_sets.rightmost(Mutator), 1343 _free_sets.leftmost(Collector), _free_sets.rightmost(Collector), 1344 _free_sets.leftmost(OldCollector), _free_sets.rightmost(OldCollector), 1345 _free_sets.alloc_from_left_bias(OldCollector)? "left to right": "right to left"); 1346 1347 for (uint i = 0; i < _heap->num_regions(); i++) { 1348 ShenandoahHeapRegion *r = _heap->get_region(i); 1349 uint idx = i % 64; 1350 if ((i != 0) && (idx == 0)) { 1351 log_debug(gc, free)(" %6u: %s", i-64, buffer); 1352 } 1353 if (_free_sets.in_free_set(i, Mutator)) { 1354 assert(!r->is_old(), "Old regions should not be in mutator_free set"); 1355 size_t capacity = alloc_capacity(r); 1356 available_mutator += capacity; 1357 consumed_mutator += region_size_bytes - capacity; 1358 buffer[idx] = (capacity == region_size_bytes)? 'M': 'm'; 1359 } else if (_free_sets.in_free_set(i, Collector)) { 1360 assert(!r->is_old(), "Old regions should not be in collector_free set"); 1361 size_t capacity = alloc_capacity(r); 1362 available_collector += capacity; 1363 consumed_collector += region_size_bytes - capacity; 1364 buffer[idx] = (capacity == region_size_bytes)? 'C': 'c'; 1365 } else if (_free_sets.in_free_set(i, OldCollector)) { 1366 size_t capacity = alloc_capacity(r); 1367 available_old_collector += capacity; 1368 consumed_old_collector += region_size_bytes - capacity; 1369 buffer[idx] = (capacity == region_size_bytes)? 'O': 'o'; 1370 } else if (r->is_humongous()) { 1371 if (r->is_old()) { 1372 buffer[idx] = 'H'; 1373 retired_old_humongous += region_size_bytes; 1374 } else { 1375 buffer[idx] = 'h'; 1376 retired_young_humongous += region_size_bytes; 1377 } 1378 } else { 1379 if (r->is_old()) { 1380 buffer[idx] = '~'; 1381 retired_old_waste += alloc_capacity(r); 1382 retired_old += region_size_bytes; 1383 } else { 1384 buffer[idx] = '_'; 1385 retired_young_waste += alloc_capacity(r); 1386 retired_young += region_size_bytes; 1387 } 1388 } 1389 } 1390 uint remnant = _heap->num_regions() % 64; 1391 if (remnant > 0) { 1392 buffer[remnant] = '\0'; 1393 } else { 1394 remnant = 64; 1395 } 1396 log_debug(gc, free)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer); 1397 size_t total_young = retired_young + retired_young_humongous; 1398 size_t total_old = retired_old + retired_old_humongous; 1399 } 1400 #endif 1401 1402 LogTarget(Info, gc, free) lt; 1403 if (lt.is_enabled()) { 1404 ResourceMark rm; 1405 LogStream ls(lt); 1406 1407 { 1408 size_t last_idx = 0; 1409 size_t max = 0; 1410 size_t max_contig = 0; 1411 size_t empty_contig = 0; 1412 1413 size_t total_used = 0; 1414 size_t total_free = 0; 1415 size_t total_free_ext = 0; 1416 1417 for (size_t idx = _free_sets.leftmost(Mutator); idx <= _free_sets.rightmost(Mutator); idx++) { 1418 if (_free_sets.in_free_set(idx, Mutator)) { 1419 ShenandoahHeapRegion *r = _heap->get_region(idx); 1420 size_t free = alloc_capacity(r); 1421 max = MAX2(max, free); 1422 if (r->is_empty()) { 1423 total_free_ext += free; 1424 if (last_idx + 1 == idx) { 1425 empty_contig++; 1426 } else { 1427 empty_contig = 1; 1428 } 1429 } else { 1430 empty_contig = 0; 1431 } 1432 total_used += r->used(); 1433 total_free += free; 1434 max_contig = MAX2(max_contig, empty_contig); 1435 last_idx = idx; 1436 } 1437 } 1438 1439 size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); 1440 size_t free = capacity() - used(); 1441 1442 assert(free == total_free, "Sum of free within mutator regions (" SIZE_FORMAT 1443 ") should match mutator capacity (" SIZE_FORMAT ") minus mutator used (" SIZE_FORMAT ")", 1444 total_free, capacity(), used()); 1445 1446 ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ", 1447 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), 1448 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), 1449 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous) 1450 ); 1451 1452 ls.print("Frag: "); 1453 size_t frag_ext; 1454 if (total_free_ext > 0) { 1455 frag_ext = 100 - (100 * max_humongous / total_free_ext); 1456 } else { 1457 frag_ext = 0; 1458 } 1459 ls.print(SIZE_FORMAT "%% external, ", frag_ext); 1460 1461 size_t frag_int; 1462 if (_free_sets.count(Mutator) > 0) { 1463 frag_int = (100 * (total_used / _free_sets.count(Mutator)) / ShenandoahHeapRegion::region_size_bytes()); 1464 } else { 1465 frag_int = 0; 1466 } 1467 ls.print(SIZE_FORMAT "%% internal; ", frag_int); 1468 ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT, 1469 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), _free_sets.count(Mutator)); 1470 } 1471 1472 { 1473 size_t max = 0; 1474 size_t total_free = 0; 1475 size_t total_used = 0; 1476 1477 for (size_t idx = _free_sets.leftmost(Collector); idx <= _free_sets.rightmost(Collector); idx++) { 1478 if (_free_sets.in_free_set(idx, Collector)) { 1479 ShenandoahHeapRegion *r = _heap->get_region(idx); 1480 size_t free = alloc_capacity(r); 1481 max = MAX2(max, free); 1482 total_free += free; 1483 total_used += r->used(); 1484 } 1485 } 1486 ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s", 1487 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), 1488 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), 1489 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used)); 1490 } 1491 1492 if (_heap->mode()->is_generational()) { 1493 size_t max = 0; 1494 size_t total_free = 0; 1495 size_t total_used = 0; 1496 1497 for (size_t idx = _free_sets.leftmost(OldCollector); idx <= _free_sets.rightmost(OldCollector); idx++) { 1498 if (_free_sets.in_free_set(idx, OldCollector)) { 1499 ShenandoahHeapRegion *r = _heap->get_region(idx); 1500 size_t free = alloc_capacity(r); 1501 max = MAX2(max, free); 1502 total_free += free; 1503 total_used += r->used(); 1504 } 1505 } 1506 ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s", 1507 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), 1508 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), 1509 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used)); 1510 } 1511 } 1512 } 1513 1514 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { 1515 shenandoah_assert_heaplocked(); 1516 1517 // Allocation request is known to satisfy all memory budgeting constraints. 1518 if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) { 1519 switch (req.type()) { 1520 case ShenandoahAllocRequest::_alloc_shared: 1521 case ShenandoahAllocRequest::_alloc_shared_gc: 1522 in_new_region = true; 1523 return allocate_contiguous(req); 1524 case ShenandoahAllocRequest::_alloc_plab: 1525 case ShenandoahAllocRequest::_alloc_gclab: 1526 case ShenandoahAllocRequest::_alloc_tlab: 1527 in_new_region = false; 1528 assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT, 1529 req.size(), ShenandoahHeapRegion::humongous_threshold_words()); 1530 return nullptr; 1531 default: 1532 ShouldNotReachHere(); 1533 return nullptr; 1534 } 1535 } else { 1536 return allocate_single(req, in_new_region); 1537 } 1538 } 1539 1540 size_t ShenandoahFreeSet::unsafe_peek_free() const { 1541 // Deliberately not locked, this method is unsafe when free set is modified. 1542 1543 for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) { 1544 if (index < _free_sets.max() && _free_sets.in_free_set(index, Mutator)) { 1545 ShenandoahHeapRegion* r = _heap->get_region(index); 1546 if (r->free() >= MinTLABSize) { 1547 return r->free(); 1548 } 1549 } 1550 } 1551 1552 // It appears that no regions left 1553 return 0; 1554 } 1555 1556 void ShenandoahFreeSet::print_on(outputStream* out) const { 1557 out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _free_sets.count(Mutator)); 1558 for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) { 1559 if (_free_sets.in_free_set(index, Mutator)) { 1560 _heap->get_region(index)->print_on(out); 1561 } 1562 } 1563 out->print_cr("Collector Free Set: " SIZE_FORMAT "", _free_sets.count(Collector)); 1564 for (size_t index = _free_sets.leftmost(Collector); index <= _free_sets.rightmost(Collector); index++) { 1565 if (_free_sets.in_free_set(index, Collector)) { 1566 _heap->get_region(index)->print_on(out); 1567 } 1568 } 1569 if (_heap->mode()->is_generational()) { 1570 out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _free_sets.count(OldCollector)); 1571 for (size_t index = _free_sets.leftmost(OldCollector); index <= _free_sets.rightmost(OldCollector); index++) { 1572 if (_free_sets.in_free_set(index, OldCollector)) { 1573 _heap->get_region(index)->print_on(out); 1574 } 1575 } 1576 } 1577 } 1578 1579 /* 1580 * Internal fragmentation metric: describes how fragmented the heap regions are. 1581 * 1582 * It is derived as: 1583 * 1584 * sum(used[i]^2, i=0..k) 1585 * IF = 1 - ------------------------------ 1586 * C * sum(used[i], i=0..k) 1587 * 1588 * ...where k is the number of regions in computation, C is the region capacity, and 1589 * used[i] is the used space in the region. 1590 * 1591 * The non-linearity causes IF to be lower for the cases where the same total heap 1592 * used is densely packed. For example: 1593 * a) Heap is completely full => IF = 0 1594 * b) Heap is half full, first 50% regions are completely full => IF = 0 1595 * c) Heap is half full, each region is 50% full => IF = 1/2 1596 * d) Heap is quarter full, first 50% regions are completely full => IF = 0 1597 * e) Heap is quarter full, each region is 25% full => IF = 3/4 1598 * f) Heap has one small object per each region => IF =~ 1 1599 */ 1600 double ShenandoahFreeSet::internal_fragmentation() { 1601 double squared = 0; 1602 double linear = 0; 1603 int count = 0; 1604 1605 for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) { 1606 if (_free_sets.in_free_set(index, Mutator)) { 1607 ShenandoahHeapRegion* r = _heap->get_region(index); 1608 size_t used = r->used(); 1609 squared += used * used; 1610 linear += used; 1611 count++; 1612 } 1613 } 1614 1615 if (count > 0) { 1616 double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear); 1617 return 1 - s; 1618 } else { 1619 return 0; 1620 } 1621 } 1622 1623 /* 1624 * External fragmentation metric: describes how fragmented the heap is. 1625 * 1626 * It is derived as: 1627 * 1628 * EF = 1 - largest_contiguous_free / total_free 1629 * 1630 * For example: 1631 * a) Heap is completely empty => EF = 0 1632 * b) Heap is completely full => EF = 0 1633 * c) Heap is first-half full => EF = 1/2 1634 * d) Heap is half full, full and empty regions interleave => EF =~ 1 1635 */ 1636 double ShenandoahFreeSet::external_fragmentation() { 1637 size_t last_idx = 0; 1638 size_t max_contig = 0; 1639 size_t empty_contig = 0; 1640 1641 size_t free = 0; 1642 1643 for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) { 1644 if (_free_sets.in_free_set(index, Mutator)) { 1645 ShenandoahHeapRegion* r = _heap->get_region(index); 1646 if (r->is_empty()) { 1647 free += ShenandoahHeapRegion::region_size_bytes(); 1648 if (last_idx + 1 == index) { 1649 empty_contig++; 1650 } else { 1651 empty_contig = 1; 1652 } 1653 } else { 1654 empty_contig = 0; 1655 } 1656 1657 max_contig = MAX2(max_contig, empty_contig); 1658 last_idx = index; 1659 } 1660 } 1661 1662 if (free > 0) { 1663 return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free); 1664 } else { 1665 return 0; 1666 } 1667 } 1668