1 /* 2 * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "gc/shared/tlab_globals.hpp" 28 #include "gc/shenandoah/shenandoahFreeSet.hpp" 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 32 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp" 33 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp" 34 #include "logging/logStream.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "runtime/orderAccess.hpp" 37 38 static const char* partition_name(ShenandoahFreeSetPartitionId t) { 39 switch (t) { 40 case ShenandoahFreeSetPartitionId::NotFree: return "NotFree"; 41 case ShenandoahFreeSetPartitionId::Mutator: return "Mutator"; 42 case ShenandoahFreeSetPartitionId::Collector: return "Collector"; 43 default: 44 ShouldNotReachHere(); 45 return "Unrecognized"; 46 } 47 } 48 49 #ifndef PRODUCT 50 void ShenandoahRegionPartitions::dump_bitmap() const { 51 log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", 52 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)], 53 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)], 54 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)], 55 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]); 56 log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT 57 "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", 58 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], 59 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], 60 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], 61 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]); 62 63 log_debug(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits"); 64 dump_bitmap_range(0, _max-1); 65 } 66 67 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const { 68 assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition"); 69 assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition"); 70 idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx); 71 idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx); 72 idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment(); 73 while (aligned_start <= aligned_end) { 74 dump_bitmap_row(aligned_start); 75 aligned_start += alignment; 76 } 77 } 78 79 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const { 80 assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition"); 81 idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx); 82 uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx); 83 uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx); 84 uintx free_bits = mutator_bits | collector_bits; 85 uintx notfree_bits = ~free_bits; 86 log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0, 87 aligned_idx, mutator_bits, collector_bits, notfree_bits); 88 } 89 #endif 90 91 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) : 92 _max(max_regions), 93 _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()), 94 _free_set(free_set), 95 _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) } 96 { 97 make_all_regions_unavailable(); 98 } 99 100 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const { 101 return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress()); 102 } 103 104 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const { 105 ShenandoahHeapRegion* r = _heap->get_region(idx); 106 return can_allocate_from(r); 107 } 108 109 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const { 110 if (r->is_trash()) { 111 // This would be recycled on allocation path 112 return ShenandoahHeapRegion::region_size_bytes(); 113 } else { 114 return r->free(); 115 } 116 } 117 118 inline size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const { 119 ShenandoahHeapRegion* r = _heap->get_region(idx); 120 return alloc_capacity(r); 121 } 122 123 inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const { 124 return alloc_capacity(r) > 0; 125 } 126 127 inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const { 128 assert (which_partition < NumPartitions, "selected free partition must be valid"); 129 idx_t idx = _leftmosts[int(which_partition)]; 130 if (idx >= _max) { 131 return _max; 132 } else { 133 // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used 134 // to query the original value of leftmost when leftmost must be adjusted because the interval representing 135 // which_partition is shrinking after the region that used to be leftmost is retired. 136 return idx; 137 } 138 } 139 140 inline idx_t ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId which_partition) const { 141 assert (which_partition < NumPartitions, "selected free partition must be valid"); 142 idx_t idx = _rightmosts[int(which_partition)]; 143 // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used 144 // to query the original value of leftmost when leftmost must be adjusted because the interval representing 145 // which_partition is shrinking after the region that used to be leftmost is retired. 146 return idx; 147 } 148 149 void ShenandoahRegionPartitions::make_all_regions_unavailable() { 150 for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) { 151 _membership[partition_id].clear_all(); 152 _leftmosts[partition_id] = _max; 153 _rightmosts[partition_id] = -1; 154 _leftmosts_empty[partition_id] = _max; 155 _rightmosts_empty[partition_id] = -1;; 156 _capacity[partition_id] = 0; 157 _used[partition_id] = 0; 158 } 159 _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; 160 } 161 162 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost, 163 idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty, 164 size_t mutator_region_count, size_t mutator_used) { 165 _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count; 166 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost; 167 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost; 168 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty; 169 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty; 170 171 _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count; 172 _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used; 173 _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes; 174 175 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max; 176 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1; 177 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max; 178 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1; 179 180 _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; 181 _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0; 182 _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0; 183 } 184 185 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { 186 assert (which_partition < NumPartitions, "Partition must be valid"); 187 _used[int(which_partition)] += bytes; 188 assert (_used[int(which_partition)] <= _capacity[int(which_partition)], 189 "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT, 190 _used[int(which_partition)], _capacity[int(which_partition)], bytes); 191 } 192 193 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( 194 ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { 195 assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values"); 196 if (low_idx == leftmost(partition)) { 197 assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed"); 198 if (high_idx + 1 == _max) { 199 _leftmosts[int(partition)] = _max; 200 } else { 201 _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1); 202 } 203 if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) { 204 // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested. 205 _leftmosts_empty[int(partition)] = leftmost(partition); 206 } 207 } 208 if (high_idx == _rightmosts[int(partition)]) { 209 assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed"); 210 if (low_idx == 0) { 211 _rightmosts[int(partition)] = -1; 212 } else { 213 _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1); 214 } 215 if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) { 216 // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested. 217 _rightmosts_empty[int(partition)] = _rightmosts[int(partition)]; 218 } 219 } 220 if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) { 221 _leftmosts[int(partition)] = _max; 222 _rightmosts[int(partition)] = -1; 223 _leftmosts_empty[int(partition)] = _max; 224 _rightmosts_empty[int(partition)] = -1; 225 } 226 } 227 228 inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) { 229 shrink_interval_if_range_modifies_either_boundary(partition, idx, idx); 230 } 231 232 inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, 233 idx_t idx, size_t region_available) { 234 if (_leftmosts[int(partition)] > idx) { 235 _leftmosts[int(partition)] = idx; 236 } 237 if (_rightmosts[int(partition)] < idx) { 238 _rightmosts[int(partition)] = idx; 239 } 240 if (region_available == _region_size_bytes) { 241 if (_leftmosts_empty[int(partition)] > idx) { 242 _leftmosts_empty[int(partition)] = idx; 243 } 244 if (_rightmosts_empty[int(partition)] < idx) { 245 _rightmosts_empty[int(partition)] = idx; 246 } 247 } 248 } 249 250 void ShenandoahRegionPartitions::retire_range_from_partition( 251 ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { 252 253 // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size() 254 assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: " SIZE_FORMAT " and " SIZE_FORMAT " < " SIZE_FORMAT, 255 low_idx, high_idx, _max); 256 assert (partition < NumPartitions, "Cannot remove from free partitions if not already free"); 257 258 for (idx_t idx = low_idx; idx <= high_idx; idx++) { 259 assert (in_free_set(partition, idx), "Must be in partition to remove from partition"); 260 _membership[int(partition)].clear_bit(idx); 261 } 262 _region_counts[int(partition)] -= high_idx + 1 - low_idx; 263 shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx); 264 } 265 266 void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) { 267 268 // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size() 269 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 270 assert (partition < NumPartitions, "Cannot remove from free partitions if not already free"); 271 assert (in_free_set(partition, idx), "Must be in partition to remove from partition"); 272 273 if (used_bytes < _region_size_bytes) { 274 // Count the alignment pad remnant of memory as used when we retire this region 275 increase_used(partition, _region_size_bytes - used_bytes); 276 } 277 _membership[int(partition)].clear_bit(idx); 278 shrink_interval_if_boundary_modified(partition, idx); 279 _region_counts[int(partition)]--; 280 } 281 282 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) { 283 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 284 assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free"); 285 assert (which_partition < NumPartitions, "selected free partition must be valid"); 286 assert (available <= _region_size_bytes, "Available cannot exceed region size"); 287 288 _membership[int(which_partition)].set_bit(idx); 289 _capacity[int(which_partition)] += _region_size_bytes; 290 _used[int(which_partition)] += _region_size_bytes - available; 291 expand_interval_if_boundary_modified(which_partition, idx, available); 292 293 _region_counts[int(which_partition)]++; 294 } 295 296 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, 297 ShenandoahFreeSetPartitionId new_partition, size_t available) { 298 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 299 assert (orig_partition < NumPartitions, "Original partition must be valid"); 300 assert (new_partition < NumPartitions, "New partition must be valid"); 301 assert (available <= _region_size_bytes, "Available cannot exceed region size"); 302 303 // Expected transitions: 304 // During rebuild: Mutator => Collector 305 // During flip_to_gc: Mutator empty => Collector 306 // At start of update refs: Collector => Mutator 307 assert (((available <= _region_size_bytes) && 308 (((orig_partition == ShenandoahFreeSetPartitionId::Mutator) 309 && (new_partition == ShenandoahFreeSetPartitionId::Collector)) || 310 ((orig_partition == ShenandoahFreeSetPartitionId::Collector) 311 && (new_partition == ShenandoahFreeSetPartitionId::Mutator)))) || 312 ((available == _region_size_bytes) && 313 ((orig_partition == ShenandoahFreeSetPartitionId::Mutator) 314 && (new_partition == ShenandoahFreeSetPartitionId::Collector))), "Unexpected movement between partitions"); 315 316 size_t used = _region_size_bytes - available; 317 318 _membership[int(orig_partition)].clear_bit(idx); 319 _membership[int(new_partition)].set_bit(idx); 320 321 _capacity[int(orig_partition)] -= _region_size_bytes; 322 _used[int(orig_partition)] -= used; 323 shrink_interval_if_boundary_modified(orig_partition, idx); 324 325 _capacity[int(new_partition)] += _region_size_bytes;; 326 _used[int(new_partition)] += used; 327 expand_interval_if_boundary_modified(new_partition, idx, available); 328 329 _region_counts[int(orig_partition)]--; 330 _region_counts[int(new_partition)]++; 331 } 332 333 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const { 334 return partition_name(membership(idx)); 335 } 336 337 inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t idx) const { 338 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 339 ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree; 340 for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) { 341 if (_membership[partition_id].is_set(idx)) { 342 assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition"); 343 result = (ShenandoahFreeSetPartitionId) partition_id; 344 } 345 } 346 return result; 347 } 348 349 #ifdef ASSERT 350 inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const { 351 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max); 352 assert (test_partition < ShenandoahFreeSetPartitionId::NotFree, "must be a valid partition"); 353 354 return membership(idx) == test_partition; 355 } 356 #endif 357 358 inline bool ShenandoahRegionPartitions::is_empty(ShenandoahFreeSetPartitionId which_partition) const { 359 assert (which_partition < NumPartitions, "selected free partition must be valid"); 360 return (leftmost(which_partition) > rightmost(which_partition)); 361 } 362 363 inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_region( 364 ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const { 365 idx_t rightmost_idx = rightmost(which_partition); 366 idx_t leftmost_idx = leftmost(which_partition); 367 if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max; 368 if (start_index < leftmost_idx) { 369 start_index = leftmost_idx; 370 } 371 idx_t result = _membership[int(which_partition)].find_first_set_bit(start_index, rightmost_idx + 1); 372 if (result > rightmost_idx) { 373 result = _max; 374 } 375 assert (result >= start_index, "Requires progress"); 376 return result; 377 } 378 379 inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_region( 380 ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const { 381 idx_t rightmost_idx = rightmost(which_partition); 382 idx_t leftmost_idx = leftmost(which_partition); 383 // if (leftmost_idx == max) then (last_index < leftmost_idx) 384 if (last_index < leftmost_idx) return -1; 385 if (last_index > rightmost_idx) { 386 last_index = rightmost_idx; 387 } 388 idx_t result = _membership[int(which_partition)].find_last_set_bit(-1, last_index); 389 if (result < leftmost_idx) { 390 result = -1; 391 } 392 assert (result <= last_index, "Requires progress"); 393 return result; 394 } 395 396 inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_cluster_of_regions( 397 ShenandoahFreeSetPartitionId which_partition, idx_t start_index, size_t cluster_size) const { 398 idx_t rightmost_idx = rightmost(which_partition); 399 idx_t leftmost_idx = leftmost(which_partition); 400 if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max; 401 idx_t result = _membership[int(which_partition)].find_first_consecutive_set_bits(start_index, rightmost_idx + 1, cluster_size); 402 if (result > rightmost_idx) { 403 result = _max; 404 } 405 assert (result >= start_index, "Requires progress"); 406 return result; 407 } 408 409 inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_cluster_of_regions( 410 ShenandoahFreeSetPartitionId which_partition, idx_t last_index, size_t cluster_size) const { 411 idx_t leftmost_idx = leftmost(which_partition); 412 // if (leftmost_idx == max) then (last_index < leftmost_idx) 413 if (last_index < leftmost_idx) return -1; 414 idx_t result = _membership[int(which_partition)].find_last_consecutive_set_bits(leftmost_idx - 1, last_index, cluster_size); 415 if (result <= leftmost_idx) { 416 result = -1; 417 } 418 assert (result <= last_index, "Requires progress"); 419 return result; 420 } 421 422 idx_t ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId which_partition) { 423 assert (which_partition < NumPartitions, "selected free partition must be valid"); 424 idx_t max_regions = _max; 425 if (_leftmosts_empty[int(which_partition)] == _max) { 426 return _max; 427 } 428 for (idx_t idx = find_index_of_next_available_region(which_partition, _leftmosts_empty[int(which_partition)]); 429 idx < max_regions; ) { 430 assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx); 431 if (_free_set->alloc_capacity(idx) == _region_size_bytes) { 432 _leftmosts_empty[int(which_partition)] = idx; 433 return idx; 434 } 435 idx = find_index_of_next_available_region(which_partition, idx + 1); 436 } 437 _leftmosts_empty[int(which_partition)] = _max; 438 _rightmosts_empty[int(which_partition)] = -1; 439 return _max; 440 } 441 442 idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId which_partition) { 443 assert (which_partition < NumPartitions, "selected free partition must be valid"); 444 if (_rightmosts_empty[int(which_partition)] < 0) { 445 return -1; 446 } 447 for (idx_t idx = find_index_of_previous_available_region(which_partition, _rightmosts_empty[int(which_partition)]); 448 idx >= 0; ) { 449 assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx); 450 if (_free_set->alloc_capacity(idx) == _region_size_bytes) { 451 _rightmosts_empty[int(which_partition)] = idx; 452 return idx; 453 } 454 idx = find_index_of_previous_available_region(which_partition, idx - 1); 455 } 456 _leftmosts_empty[int(which_partition)] = _max; 457 _rightmosts_empty[int(which_partition)] = -1; 458 return -1; 459 } 460 461 462 #ifdef ASSERT 463 void ShenandoahRegionPartitions::assert_bounds() { 464 465 idx_t leftmosts[UIntNumPartitions]; 466 idx_t rightmosts[UIntNumPartitions]; 467 idx_t empty_leftmosts[UIntNumPartitions]; 468 idx_t empty_rightmosts[UIntNumPartitions]; 469 470 for (uint i = 0; i < UIntNumPartitions; i++) { 471 leftmosts[i] = _max; 472 empty_leftmosts[i] = _max; 473 rightmosts[i] = -1; 474 empty_rightmosts[i] = -1; 475 } 476 477 for (idx_t i = 0; i < _max; i++) { 478 ShenandoahFreeSetPartitionId partition = membership(i); 479 switch (partition) { 480 case ShenandoahFreeSetPartitionId::NotFree: 481 break; 482 483 case ShenandoahFreeSetPartitionId::Mutator: 484 case ShenandoahFreeSetPartitionId::Collector: 485 { 486 size_t capacity = _free_set->alloc_capacity(i); 487 bool is_empty = (capacity == _region_size_bytes); 488 assert(capacity > 0, "free regions must have allocation capacity"); 489 if (i < leftmosts[int(partition)]) { 490 leftmosts[int(partition)] = i; 491 } 492 if (is_empty && (i < empty_leftmosts[int(partition)])) { 493 empty_leftmosts[int(partition)] = i; 494 } 495 if (i > rightmosts[int(partition)]) { 496 rightmosts[int(partition)] = i; 497 } 498 if (is_empty && (i > empty_rightmosts[int(partition)])) { 499 empty_rightmosts[int(partition)] = i; 500 } 501 break; 502 } 503 504 default: 505 ShouldNotReachHere(); 506 } 507 } 508 509 // Performance invariants. Failing these would not break the free partition, but performance would suffer. 510 assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) <= _max, 511 "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Mutator), _max); 512 assert (rightmost(ShenandoahFreeSetPartitionId::Mutator) < _max, 513 "rightmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator), _max); 514 515 assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max 516 || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator), 517 "leftmost region should be free: " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Mutator)); 518 assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max 519 || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator), 520 "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator)); 521 522 // If Mutator partition is empty, leftmosts will both equal max, rightmosts will both equal zero. 523 // Likewise for empty region partitions. 524 idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; 525 idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; 526 assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator), 527 "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, 528 beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator)); 529 assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator), 530 "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, 531 end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator)); 532 533 beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; 534 end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; 535 assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator), 536 "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, 537 beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)); 538 assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator), 539 "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, 540 end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator)); 541 542 // Performance invariants. Failing these would not break the free partition, but performance would suffer. 543 assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, 544 leftmost(ShenandoahFreeSetPartitionId::Collector), _max); 545 assert (rightmost(ShenandoahFreeSetPartitionId::Collector) < _max, "rightmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, 546 rightmost(ShenandoahFreeSetPartitionId::Collector), _max); 547 548 assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max 549 || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector), 550 "leftmost region should be free: " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Collector)); 551 assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max 552 || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector), 553 "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Collector)); 554 555 // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero. 556 // Likewise for empty region partitions. 557 beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)]; 558 end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]; 559 assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector), 560 "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, 561 beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector)); 562 assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector), 563 "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, 564 end_off, rightmost(ShenandoahFreeSetPartitionId::Collector)); 565 566 beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)]; 567 end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]; 568 assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], 569 "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, 570 beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector)); 571 assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], 572 "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT, 573 end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector)); 574 } 575 #endif 576 577 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : 578 _heap(heap), 579 _partitions(max_regions, this), 580 _trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)), 581 _right_to_left_bias(false), 582 _alloc_bias_weight(0) 583 { 584 clear_internal(); 585 } 586 587 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) { 588 shenandoah_assert_heaplocked(); 589 590 // Scan the bitmap looking for a first fit. 591 // 592 // Leftmost and rightmost bounds provide enough caching to quickly find a region from which to allocate. 593 // 594 // Allocations are biased: GC allocations are taken from the high end of the heap. Regular (and TLAB) 595 // mutator allocations are taken from the middle of heap, below the memory reserved for Collector. 596 // Humongous mutator allocations are taken from the bottom of the heap. 597 // 598 // Free set maintains mutator and collector partitions. Mutator can only allocate from the 599 // Mutator partition. Collector prefers to allocate from the Collector partition, but may steal 600 // regions from the Mutator partition if the Collector partition has been depleted. 601 602 switch (req.type()) { 603 case ShenandoahAllocRequest::_alloc_tlab: 604 case ShenandoahAllocRequest::_alloc_shared: { 605 // Try to allocate in the mutator view 606 if (_alloc_bias_weight-- <= 0) { 607 // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other 608 // of the heap. Typically, these are the more recently engaged regions and the objects in these regions have not 609 // yet had a chance to die (and/or are treated as floating garbage). If we use the same allocation bias on each 610 // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions 611 // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced 612 // during the most recent GC pass may once again prevent the region from being collected. We have found that 613 // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain 614 // benchmarks. In the best case, this has the effect of consuming these partially consumed regions before 615 // the start of the next mark cycle so all of their garbage can be efficiently reclaimed. 616 // 617 // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of 618 // available regions. Other potential benefits: 619 // 1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions 620 // 2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures 621 // late in the GC cycle. 622 idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator) 623 - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator)); 624 idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator) 625 - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator)); 626 _right_to_left_bias = (non_empty_on_right > non_empty_on_left); 627 _alloc_bias_weight = _InitialAllocBiasWeight; 628 } 629 if (_right_to_left_bias) { 630 // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations 631 if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) { 632 // Use signed idx. Otherwise, loop will never terminate. 633 idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); 634 for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) { 635 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx), 636 "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx); 637 ShenandoahHeapRegion* r = _heap->get_region(idx); 638 // try_allocate_in() increases used if the allocation is successful. 639 HeapWord* result; 640 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size(); 641 if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) { 642 return result; 643 } 644 idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1); 645 } 646 } 647 } else { 648 // Allocate from low to high memory. This keeps the range of fully empty regions more tightly packed. 649 // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle. So this 650 // tends to accumulate "fragmented" uncollected regions in high memory. 651 if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) { 652 // Use signed idx. Otherwise, loop will never terminate. 653 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); 654 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) { 655 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx), 656 "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx); 657 ShenandoahHeapRegion* r = _heap->get_region(idx); 658 // try_allocate_in() increases used if the allocation is successful. 659 HeapWord* result; 660 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size(); 661 if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) { 662 return result; 663 } 664 idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1); 665 } 666 } 667 } 668 // There is no recovery. Mutator does not touch collector view at all. 669 break; 670 } 671 case ShenandoahAllocRequest::_alloc_gclab: 672 // GCLABs are for evacuation so we must be in evacuation phase. 673 674 case ShenandoahAllocRequest::_alloc_shared_gc: { 675 // Fast-path: try to allocate in the collector view first 676 idx_t leftmost_collector = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); 677 for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx >= leftmost_collector; ) { 678 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx), 679 "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx); 680 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region); 681 if (result != nullptr) { 682 return result; 683 } 684 idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Collector, idx - 1); 685 } 686 687 // No dice. Can we borrow space from mutator view? 688 if (!ShenandoahEvacReserveOverflow) { 689 return nullptr; 690 } 691 692 // Try to steal an empty region from the mutator view. 693 idx_t leftmost_mutator_empty = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator); 694 for (idx_t idx = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost_mutator_empty; ) { 695 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx), 696 "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx); 697 ShenandoahHeapRegion* r = _heap->get_region(idx); 698 if (can_allocate_from(r)) { 699 flip_to_gc(r); 700 HeapWord *result = try_allocate_in(r, req, in_new_region); 701 if (result != nullptr) { 702 log_debug(gc)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req)); 703 return result; 704 } 705 } 706 idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1); 707 } 708 709 // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM 710 // due to GC allocations would expose unparsable mutator allocations. 711 break; 712 } 713 default: 714 ShouldNotReachHere(); 715 } 716 return nullptr; 717 } 718 719 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) { 720 assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index()); 721 if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) { 722 return nullptr; 723 } 724 725 HeapWord* result = nullptr; 726 try_recycle_trashed(r); 727 in_new_region = r->is_empty(); 728 729 if (in_new_region) { 730 log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").", 731 r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req)); 732 } 733 734 // req.size() is in words, r->free() is in bytes. 735 if (req.is_lab_alloc()) { 736 // This is a GCLAB or a TLAB allocation 737 size_t adjusted_size = req.size(); 738 size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment); 739 if (adjusted_size > free) { 740 adjusted_size = free; 741 } 742 if (adjusted_size >= req.min_size()) { 743 result = r->allocate(adjusted_size, req.type()); 744 log_debug(gc)("Allocated " SIZE_FORMAT " words (adjusted from " SIZE_FORMAT ") for %s @" PTR_FORMAT 745 " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT, 746 adjusted_size, req.size(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result), 747 _partitions.partition_membership_name(r->index()), r->index(), r->free()); 748 assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size); 749 req.set_actual_size(adjusted_size); 750 } else { 751 log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT 752 " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size()); 753 } 754 } else { 755 size_t size = req.size(); 756 result = r->allocate(size, req.type()); 757 if (result != nullptr) { 758 // Record actual allocation size 759 log_debug(gc)("Allocated " SIZE_FORMAT " words for %s @" PTR_FORMAT 760 " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT, 761 size, ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result), 762 _partitions.partition_membership_name(r->index()), r->index(), r->free()); 763 req.set_actual_size(size); 764 } 765 } 766 767 if (result != nullptr) { 768 // Allocation successful, bump stats: 769 if (req.is_mutator_alloc()) { 770 _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize); 771 } else { 772 assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc"); 773 774 // For GC allocations, we advance update_watermark because the objects relocated into this memory during 775 // evacuation are not updated during evacuation. 776 r->set_update_watermark(r->top()); 777 } 778 } 779 780 static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste)); 781 size_t ac = alloc_capacity(r); 782 783 if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) { 784 // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region. 785 // Note that retire_from_partition() increases used to account for waste. 786 787 // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size, 788 // then retire the region so that subsequent searches can find available memory more quickly. 789 790 size_t idx = r->index(); 791 _partitions.retire_from_partition(req.is_mutator_alloc()? 792 ShenandoahFreeSetPartitionId::Mutator: ShenandoahFreeSetPartitionId::Collector, 793 idx, r->used()); 794 _partitions.assert_bounds(); 795 } 796 return result; 797 } 798 799 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { 800 assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator"); 801 shenandoah_assert_heaplocked(); 802 803 size_t words_size = req.size(); 804 idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 805 806 // Check if there are enough regions left to satisfy allocation. 807 if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) { 808 return nullptr; 809 } 810 811 idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator); 812 idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1; 813 idx_t last_possible_start = end_range - num; 814 815 // Find the continuous interval of $num regions, starting from $beg and ending in $end, 816 // inclusive. Contiguous allocations are biased to the beginning. 817 idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator, 818 start_range, num); 819 if (beg > last_possible_start) { 820 // Hit the end, goodbye 821 return nullptr; 822 } 823 idx_t end = beg; 824 825 while (true) { 826 // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership. 827 // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. If we can extend 828 // the existing range, we can exploit that certain regions are already known to be in the Mutator free set. 829 while (!can_allocate_from(_heap->get_region(end))) { 830 // region[end] is not empty, so we restart our search after region[end] 831 idx_t slide_delta = end + 1 - beg; 832 if (beg + slide_delta > last_possible_start) { 833 // no room to slide 834 return nullptr; 835 } 836 for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) { 837 if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) { 838 beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator, 839 span_end + 1, num); 840 break; 841 } else { 842 beg++; 843 span_end++; 844 } 845 } 846 // Here, either beg identifies a range of num regions all of which are in the Mutator free set, or beg > last_possible_start 847 if (beg > last_possible_start) { 848 // Hit the end, goodbye 849 return nullptr; 850 } 851 end = beg; 852 } 853 854 if ((end - beg + 1) == num) { 855 // found the match 856 break; 857 } 858 859 end++; 860 } 861 862 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 863 // Initialize regions: 864 for (idx_t i = beg; i <= end; i++) { 865 ShenandoahHeapRegion* r = _heap->get_region(i); 866 try_recycle_trashed(r); 867 868 assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); 869 assert(r->is_empty(), "Should be empty"); 870 871 if (i == beg) { 872 r->make_humongous_start(); 873 } else { 874 r->make_humongous_cont(); 875 } 876 877 // Trailing region may be non-full, record the remainder there 878 size_t used_words; 879 if ((i == end) && (remainder != 0)) { 880 used_words = remainder; 881 } else { 882 used_words = ShenandoahHeapRegion::region_size_words(); 883 } 884 885 r->set_top(r->bottom() + used_words); 886 } 887 888 if (remainder != 0) { 889 // Record this remainder as allocation waste 890 _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); 891 } 892 893 // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate 894 _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); 895 896 size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; 897 _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); 898 _partitions.assert_bounds(); 899 req.set_actual_size(words_size); 900 return _heap->get_region(beg)->bottom(); 901 } 902 903 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) { 904 if (r->is_trash()) { 905 _heap->decrease_used(r->used()); 906 r->recycle(); 907 } 908 } 909 910 void ShenandoahFreeSet::recycle_trash() { 911 // lock is not reentrable, check we don't have it 912 shenandoah_assert_not_heaplocked(); 913 size_t count = 0; 914 for (size_t i = 0; i < _heap->num_regions(); i++) { 915 ShenandoahHeapRegion* r = _heap->get_region(i); 916 if (r->is_trash()) { 917 _trash_regions[count++] = r; 918 } 919 } 920 921 size_t total_batches = 0; 922 jlong batch_start_time = 0; 923 jlong recycle_trash_start_time = os::javaTimeNanos(); // This value will be treated as the initial batch_start_time 924 jlong batch_end_time = recycle_trash_start_time; 925 // Process as many batches as can be processed within 10 us. 926 static constexpr jlong deadline_ns = 10000; // 10 us 927 size_t idx = 0; 928 jlong predicted_next_batch_end_time; 929 jlong batch_process_time_estimate = 0; 930 while (idx < count) { 931 if (idx > 0) { 932 os::naked_yield(); // Yield to allow allocators to take the lock, except on the first iteration 933 } 934 // Avoid another call to javaTimeNanos() if we already know time at which last batch ended 935 batch_start_time = batch_end_time; 936 const jlong deadline = batch_start_time + deadline_ns; 937 938 ShenandoahHeapLocker locker(_heap->lock()); 939 do { 940 // Measurements on typical 2024 hardware suggest it typically requires between 1400 and 2000 ns to process a batch of 941 // 32 regions, assuming low contention with other threads. Sometimes this goes higher, when mutator threads 942 // are contending for CPU cores and/or the heap lock. On this hardware with a 10 us deadline, we expect 3-6 batches 943 // to be processed between yields most of the time. 944 // 945 // Note that deadline is enforced since the end of previous batch. In the case that yield() or acquisition of heap lock 946 // takes a "long time", we will have less time to process regions, but we will always process at least one batch between 947 // yields. Yielding more frequently when there is heavy contention for the heap lock or for CPU cores is considered the 948 // right thing to do. 949 const size_t REGIONS_PER_BATCH = 32; 950 size_t max_idx = MIN2(count, idx + REGIONS_PER_BATCH); 951 while (idx < max_idx) { 952 try_recycle_trashed(_trash_regions[idx++]); 953 } 954 total_batches++; 955 batch_end_time = os::javaTimeNanos(); 956 // Estimate includes historic combination of yield times and heap lock acquisition times. 957 batch_process_time_estimate = (batch_end_time - recycle_trash_start_time) / total_batches; 958 predicted_next_batch_end_time = batch_end_time + batch_process_time_estimate; 959 } while ((idx < count) && (predicted_next_batch_end_time < deadline)); 960 } 961 } 962 963 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { 964 size_t idx = r->index(); 965 966 assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view"); 967 assert(can_allocate_from(r), "Should not be allocated"); 968 969 size_t ac = alloc_capacity(r); 970 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, 971 ShenandoahFreeSetPartitionId::Collector, ac); 972 _partitions.assert_bounds(); 973 974 // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, 975 // to recycle trash before attempting to allocate anything in the region. 976 } 977 978 void ShenandoahFreeSet::clear() { 979 shenandoah_assert_heaplocked(); 980 clear_internal(); 981 } 982 983 void ShenandoahFreeSet::clear_internal() { 984 _partitions.make_all_regions_unavailable(); 985 } 986 987 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &cset_regions) { 988 989 cset_regions = 0; 990 clear_internal(); 991 size_t region_size_bytes = _partitions.region_size_bytes(); 992 size_t max_regions = _partitions.max_regions(); 993 994 size_t mutator_leftmost = max_regions; 995 size_t mutator_rightmost = 0; 996 size_t mutator_leftmost_empty = max_regions; 997 size_t mutator_rightmost_empty = 0; 998 999 size_t mutator_regions = 0; 1000 size_t mutator_used = 0; 1001 1002 for (size_t idx = 0; idx < _heap->num_regions(); idx++) { 1003 ShenandoahHeapRegion* region = _heap->get_region(idx); 1004 if (region->is_trash()) { 1005 // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up". 1006 // The cset regions are not "trashed" until we have finished update refs. 1007 cset_regions++; 1008 } 1009 if (region->is_alloc_allowed() || region->is_trash()) { 1010 1011 // Do not add regions that would almost surely fail allocation 1012 size_t ac = alloc_capacity(region); 1013 if (ac > PLAB::min_size() * HeapWordSize) { 1014 _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator); 1015 1016 if (idx < mutator_leftmost) { 1017 mutator_leftmost = idx; 1018 } 1019 if (idx > mutator_rightmost) { 1020 mutator_rightmost = idx; 1021 } 1022 if (ac == region_size_bytes) { 1023 if (idx < mutator_leftmost_empty) { 1024 mutator_leftmost_empty = idx; 1025 } 1026 if (idx > mutator_rightmost_empty) { 1027 mutator_rightmost_empty = idx; 1028 } 1029 } 1030 mutator_regions++; 1031 mutator_used += (region_size_bytes - ac); 1032 1033 log_debug(gc)( 1034 " Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator partition", 1035 idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()), 1036 byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used())); 1037 } 1038 } 1039 } 1040 idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost; 1041 idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty; 1042 _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx, 1043 mutator_regions, mutator_used); 1044 } 1045 1046 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) { 1047 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 1048 size_t collector_empty_xfer = 0; 1049 size_t collector_not_empty_xfer = 0; 1050 1051 // Process empty regions within the Collector free partition 1052 if ((max_xfer_regions > 0) && 1053 (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector) 1054 <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) { 1055 ShenandoahHeapLocker locker(_heap->lock()); 1056 idx_t rightmost = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector); 1057 for (idx_t idx = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector); 1058 (max_xfer_regions > 0) && (idx <= rightmost); ) { 1059 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx), 1060 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx); 1061 // Note: can_allocate_from() denotes that region is entirely empty 1062 if (can_allocate_from(idx)) { 1063 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector, 1064 ShenandoahFreeSetPartitionId::Mutator, region_size_bytes); 1065 max_xfer_regions--; 1066 collector_empty_xfer += region_size_bytes; 1067 } 1068 idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1); 1069 } 1070 } 1071 1072 // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition 1073 if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector) 1074 <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) { 1075 ShenandoahHeapLocker locker(_heap->lock()); 1076 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); 1077 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); 1078 (max_xfer_regions > 0) && (idx <= rightmost); ) { 1079 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx), 1080 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx); 1081 size_t ac = alloc_capacity(idx); 1082 if (ac > 0) { 1083 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector, 1084 ShenandoahFreeSetPartitionId::Mutator, ac); 1085 max_xfer_regions--; 1086 collector_not_empty_xfer += ac; 1087 } 1088 idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1); 1089 } 1090 } 1091 1092 size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer; 1093 log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve", 1094 byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer)); 1095 } 1096 1097 void ShenandoahFreeSet::prepare_to_rebuild(size_t &cset_regions) { 1098 shenandoah_assert_heaplocked(); 1099 1100 log_debug(gc)("Rebuilding FreeSet"); 1101 1102 // This places regions that have alloc_capacity into the mutator partition. 1103 find_regions_with_alloc_capacity(cset_regions); 1104 } 1105 1106 void ShenandoahFreeSet::finish_rebuild(size_t cset_regions) { 1107 shenandoah_assert_heaplocked(); 1108 1109 // Our desire is to reserve this much memory for future evacuation. We may end up reserving less, if 1110 // memory is in short supply. 1111 1112 size_t reserve = _heap->max_capacity() * ShenandoahEvacReserve / 100; 1113 size_t available_in_collector_partition = (_partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector) 1114 - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); 1115 size_t additional_reserve; 1116 if (available_in_collector_partition < reserve) { 1117 additional_reserve = reserve - available_in_collector_partition; 1118 } else { 1119 additional_reserve = 0; 1120 } 1121 1122 reserve_regions(reserve); 1123 _partitions.assert_bounds(); 1124 log_status(); 1125 } 1126 1127 void ShenandoahFreeSet::rebuild() { 1128 size_t cset_regions; 1129 prepare_to_rebuild(cset_regions); 1130 finish_rebuild(cset_regions); 1131 } 1132 1133 void ShenandoahFreeSet::reserve_regions(size_t to_reserve) { 1134 for (size_t i = _heap->num_regions(); i > 0; i--) { 1135 size_t idx = i - 1; 1136 ShenandoahHeapRegion* r = _heap->get_region(idx); 1137 1138 if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) { 1139 continue; 1140 } 1141 1142 size_t ac = alloc_capacity(r); 1143 assert (ac > 0, "Membership in free partition implies has capacity"); 1144 1145 bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve; 1146 if (!move_to_collector) { 1147 // We've satisfied to_reserve 1148 break; 1149 } 1150 1151 if (move_to_collector) { 1152 // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if 1153 // they were entirely empty. This has the effect of causing new Mutator allocation to reside next to objects 1154 // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region. 1155 // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains 1156 // survivor objects is less likely to be selected for the collection set. This alternative implementation allows 1157 // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects 1158 // occupy regions comprised entirely of ephemeral objects. These regions are highly likely to be included in the next 1159 // collection set, and they are easily evacuated because they have low density of live objects. 1160 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, 1161 ShenandoahFreeSetPartitionId::Collector, ac); 1162 log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx); 1163 } 1164 } 1165 1166 if (LogTarget(Info, gc, free)::is_enabled()) { 1167 size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector); 1168 if (reserve < to_reserve) { 1169 log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT, 1170 PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve)); 1171 } 1172 } 1173 } 1174 1175 void ShenandoahFreeSet::log_status_under_lock() { 1176 // Must not be heap locked, it acquires heap lock only when log is enabled 1177 shenandoah_assert_not_heaplocked(); 1178 if (LogTarget(Info, gc, free)::is_enabled() 1179 DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) { 1180 ShenandoahHeapLocker locker(_heap->lock()); 1181 log_status(); 1182 } 1183 } 1184 1185 void ShenandoahFreeSet::log_status() { 1186 shenandoah_assert_heaplocked(); 1187 1188 #ifdef ASSERT 1189 // Dump of the FreeSet details is only enabled if assertions are enabled 1190 if (LogTarget(Debug, gc, free)::is_enabled()) { 1191 #define BUFFER_SIZE 80 1192 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); 1193 size_t consumed_collector = 0; 1194 size_t available_collector = 0; 1195 size_t consumed_mutator = 0; 1196 size_t available_mutator = 0; 1197 1198 char buffer[BUFFER_SIZE]; 1199 for (uint i = 0; i < BUFFER_SIZE; i++) { 1200 buffer[i] = '\0'; 1201 } 1202 log_debug(gc)("FreeSet map legend: M:mutator_free C:collector_free H:humongous _:retired"); 1203 log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "]," 1204 " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "]", 1205 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), 1206 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), 1207 _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector), 1208 _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector)); 1209 1210 for (uint i = 0; i < _heap->num_regions(); i++) { 1211 ShenandoahHeapRegion *r = _heap->get_region(i); 1212 uint idx = i % 64; 1213 if ((i != 0) && (idx == 0)) { 1214 log_debug(gc)(" %6u: %s", i-64, buffer); 1215 } 1216 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) { 1217 size_t capacity = alloc_capacity(r); 1218 available_mutator += capacity; 1219 consumed_mutator += region_size_bytes - capacity; 1220 buffer[idx] = (capacity == region_size_bytes)? 'M': 'm'; 1221 } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) { 1222 size_t capacity = alloc_capacity(r); 1223 available_collector += capacity; 1224 consumed_collector += region_size_bytes - capacity; 1225 buffer[idx] = (capacity == region_size_bytes)? 'C': 'c'; 1226 } else if (r->is_humongous()) { 1227 buffer[idx] = 'h'; 1228 } else { 1229 buffer[idx] = '_'; 1230 } 1231 } 1232 uint remnant = _heap->num_regions() % 64; 1233 if (remnant > 0) { 1234 buffer[remnant] = '\0'; 1235 } else { 1236 remnant = 64; 1237 } 1238 log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer); 1239 } 1240 #endif 1241 1242 LogTarget(Info, gc, free) lt; 1243 if (lt.is_enabled()) { 1244 ResourceMark rm; 1245 LogStream ls(lt); 1246 1247 { 1248 idx_t last_idx = 0; 1249 size_t max = 0; 1250 size_t max_contig = 0; 1251 size_t empty_contig = 0; 1252 1253 size_t total_used = 0; 1254 size_t total_free = 0; 1255 size_t total_free_ext = 0; 1256 1257 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); 1258 idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) { 1259 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) { 1260 ShenandoahHeapRegion *r = _heap->get_region(idx); 1261 size_t free = alloc_capacity(r); 1262 max = MAX2(max, free); 1263 if (r->is_empty()) { 1264 total_free_ext += free; 1265 if (last_idx + 1 == idx) { 1266 empty_contig++; 1267 } else { 1268 empty_contig = 1; 1269 } 1270 } else { 1271 empty_contig = 0; 1272 } 1273 total_used += r->used(); 1274 total_free += free; 1275 max_contig = MAX2(max_contig, empty_contig); 1276 last_idx = idx; 1277 } 1278 } 1279 1280 size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); 1281 size_t free = capacity() - used(); 1282 1283 // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been 1284 // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match 1285 // my internally tracked values of used() and free(). 1286 assert(free == total_free, "Free memory should match"); 1287 1288 ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ", 1289 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free), 1290 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), 1291 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous) 1292 ); 1293 1294 ls.print("Frag: "); 1295 size_t frag_ext; 1296 if (total_free_ext > 0) { 1297 frag_ext = 100 - (100 * max_humongous / total_free_ext); 1298 } else { 1299 frag_ext = 0; 1300 } 1301 ls.print(SIZE_FORMAT "%% external, ", frag_ext); 1302 1303 size_t frag_int; 1304 if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) { 1305 frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) 1306 / ShenandoahHeapRegion::region_size_bytes()); 1307 } else { 1308 frag_int = 0; 1309 } 1310 ls.print(SIZE_FORMAT "%% internal; ", frag_int); 1311 ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT, 1312 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), 1313 _partitions.count(ShenandoahFreeSetPartitionId::Mutator)); 1314 } 1315 1316 { 1317 size_t max = 0; 1318 size_t total_free = 0; 1319 size_t total_used = 0; 1320 1321 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); 1322 idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) { 1323 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) { 1324 ShenandoahHeapRegion *r = _heap->get_region(idx); 1325 size_t free = alloc_capacity(r); 1326 max = MAX2(max, free); 1327 total_free += free; 1328 total_used += r->used(); 1329 } 1330 } 1331 ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s", 1332 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), 1333 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), 1334 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used)); 1335 } 1336 } 1337 } 1338 1339 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { 1340 shenandoah_assert_heaplocked(); 1341 if (ShenandoahHeapRegion::requires_humongous(req.size())) { 1342 switch (req.type()) { 1343 case ShenandoahAllocRequest::_alloc_shared: 1344 case ShenandoahAllocRequest::_alloc_shared_gc: 1345 in_new_region = true; 1346 return allocate_contiguous(req); 1347 case ShenandoahAllocRequest::_alloc_gclab: 1348 case ShenandoahAllocRequest::_alloc_tlab: 1349 in_new_region = false; 1350 assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size()); 1351 return nullptr; 1352 default: 1353 ShouldNotReachHere(); 1354 return nullptr; 1355 } 1356 } else { 1357 return allocate_single(req, in_new_region); 1358 } 1359 } 1360 1361 void ShenandoahFreeSet::print_on(outputStream* out) const { 1362 out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator)); 1363 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); 1364 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) { 1365 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index), 1366 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index); 1367 _heap->get_region(index)->print_on(out); 1368 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1); 1369 } 1370 out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector)); 1371 rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); 1372 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) { 1373 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index), 1374 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index); 1375 _heap->get_region(index)->print_on(out); 1376 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1); 1377 } 1378 } 1379 1380 double ShenandoahFreeSet::internal_fragmentation() { 1381 double squared = 0; 1382 double linear = 0; 1383 1384 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); 1385 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) { 1386 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index), 1387 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index); 1388 ShenandoahHeapRegion* r = _heap->get_region(index); 1389 size_t used = r->used(); 1390 squared += used * used; 1391 linear += used; 1392 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1); 1393 } 1394 1395 if (linear > 0) { 1396 double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear); 1397 return 1 - s; 1398 } else { 1399 return 0; 1400 } 1401 } 1402 1403 double ShenandoahFreeSet::external_fragmentation() { 1404 idx_t last_idx = 0; 1405 size_t max_contig = 0; 1406 size_t empty_contig = 0; 1407 1408 size_t free = 0; 1409 1410 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); 1411 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) { 1412 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index), 1413 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index); 1414 ShenandoahHeapRegion* r = _heap->get_region(index); 1415 if (r->is_empty()) { 1416 free += ShenandoahHeapRegion::region_size_bytes(); 1417 if (last_idx + 1 == index) { 1418 empty_contig++; 1419 } else { 1420 empty_contig = 1; 1421 } 1422 } else { 1423 empty_contig = 0; 1424 } 1425 max_contig = MAX2(max_contig, empty_contig); 1426 last_idx = index; 1427 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1); 1428 } 1429 1430 if (free > 0) { 1431 return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free); 1432 } else { 1433 return 0; 1434 } 1435 } 1436