< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page

   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"


  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  32 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
  33 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"

  34 #include "logging/logStream.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "runtime/orderAccess.hpp"
  37 
  38 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
  39   switch (t) {
  40     case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
  41     case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
  42     case ShenandoahFreeSetPartitionId::Collector: return "Collector";
  43     default:
  44       ShouldNotReachHere();
  45       return "Unrecognized";
  46   }
  47 }
  48 
  49 #ifndef PRODUCT
  50 void ShenandoahRegionPartitions::dump_bitmap() const {
  51   log_info(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
  52                _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  53                _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
  54                _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
  55                _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]);
  56   log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
  57                "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
  58                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  59                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
  60                _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
  61                _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
  62 
  63   log_info(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits");
  64   dump_bitmap_range(0, _max-1);
  65 }
  66 
  67 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
  68   assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
  69   assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
  70   idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
  71   idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
  72   idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
  73   while (aligned_start <= aligned_end) {
  74     dump_bitmap_row(aligned_start);
  75     aligned_start += alignment;
  76   }
  77 }
  78 
  79 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
  80   assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
  81   idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
  82   uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
  83   uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
  84   uintx free_bits = mutator_bits | collector_bits;
  85   uintx notfree_bits =  ~free_bits;
  86   log_info(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
  87                aligned_idx, mutator_bits, collector_bits, notfree_bits);
  88 }
  89 #endif
  90 
  91 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
  92     _max(max_regions),
  93     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
  94     _free_set(free_set),
  95     _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) }
  96 {
  97   make_all_regions_unavailable();

  98 }
  99 
 100 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 101   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 102 }
 103 
 104 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 105   ShenandoahHeapRegion* r = _heap->get_region(idx);
 106   return can_allocate_from(r);
 107 }
 108 
 109 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 110   if (r->is_trash()) {
 111     // This would be recycled on allocation path
 112     return ShenandoahHeapRegion::region_size_bytes();
 113   } else {
 114     return r->free();
 115   }
 116 }
 117 
 118 inline size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
 119   ShenandoahHeapRegion* r = _heap->get_region(idx);
 120   return alloc_capacity(r);
 121 }
 122 
 123 inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
 124   return alloc_capacity(r) > 0;
 125 }
 126 
 127 inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const {
 128   assert (which_partition < NumPartitions, "selected free partition must be valid");
 129   idx_t idx = _leftmosts[int(which_partition)];
 130   if (idx >= _max) {
 131     return _max;
 132   } else {
 133     // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
 134     // to query the original value of leftmost when leftmost must be adjusted because the interval representing
 135     // which_partition is shrinking after the region that used to be leftmost is retired.
 136     return idx;
 137   }
 138 }
 139 
 140 inline idx_t ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId which_partition) const {
 141   assert (which_partition < NumPartitions, "selected free partition must be valid");
 142   idx_t idx = _rightmosts[int(which_partition)];
 143   // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
 144   // to query the original value of leftmost when leftmost must be adjusted because the interval representing
 145   // which_partition is shrinking after the region that used to be leftmost is retired.
 146   return idx;
 147 }
 148 
 149 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
 150   for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
 151     _membership[partition_id].clear_all();
 152     _leftmosts[partition_id] = _max;
 153     _rightmosts[partition_id] = -1;
 154     _leftmosts_empty[partition_id] = _max;
 155     _rightmosts_empty[partition_id] = -1;;
 156     _capacity[partition_id] = 0;
 157     _used[partition_id] = 0;
 158   }
 159   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 160 }
 161 
 162 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
 163                                                              idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
 164                                                              size_t mutator_region_count, size_t mutator_used) {
 165   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 166   _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
 167   _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
 168   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
 169   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
 170 
 171   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 172   _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
 173   _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
 174 
 175   _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 176   _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 177   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 178   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 179 
 180   _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 181   _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 182   _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 183 }
 184 
 185 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
 186   assert (which_partition < NumPartitions, "Partition must be valid");
 187   _used[int(which_partition)] += bytes;
 188   assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
 189           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
 190           _used[int(which_partition)], _capacity[int(which_partition)], bytes);
 191 }
 192 
 193 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
 194   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 195   assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
 196   if (low_idx == leftmost(partition)) {
 197     assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
 198     if (high_idx + 1 == _max) {
 199       _leftmosts[int(partition)] = _max;
 200     } else {
 201       _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
 202     }
 203     if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
 204       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
 205       _leftmosts_empty[int(partition)] = leftmost(partition);
 206     }
 207   }
 208   if (high_idx == _rightmosts[int(partition)]) {
 209     assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
 210     if (low_idx == 0) {
 211       _rightmosts[int(partition)] = -1;
 212     } else {
 213       _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
 214     }
 215     if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
 216       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 217       _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
 218     }
 219   }
 220   if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
 221     _leftmosts[int(partition)] = _max;
 222     _rightmosts[int(partition)] = -1;
 223     _leftmosts_empty[int(partition)] = _max;
 224     _rightmosts_empty[int(partition)] = -1;
 225   }
 226 }
 227 
 228 inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) {
 229   shrink_interval_if_range_modifies_either_boundary(partition, idx, idx);
 230 }
 231 
 232 inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition,
 233                                                                              idx_t idx, size_t region_available) {
 234   if (_leftmosts[int(partition)] > idx) {
 235     _leftmosts[int(partition)] = idx;
 236   }
 237   if (_rightmosts[int(partition)] < idx) {
 238     _rightmosts[int(partition)] = idx;
 239   }
 240   if (region_available == _region_size_bytes) {
 241     if (_leftmosts_empty[int(partition)] > idx) {
 242       _leftmosts_empty[int(partition)] = idx;
 243     }
 244     if (_rightmosts_empty[int(partition)] < idx) {
 245       _rightmosts_empty[int(partition)] = idx;
 246     }
 247   }
 248 }
 249 
 250 void ShenandoahRegionPartitions::retire_range_from_partition(
 251   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 252 
 253   // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
 254   assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: " SIZE_FORMAT " and " SIZE_FORMAT " < " SIZE_FORMAT,
 255           low_idx, high_idx, _max);
 256   assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
 257 
 258   for (idx_t idx = low_idx; idx <= high_idx; idx++) {
 259     assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
 260     _membership[int(partition)].clear_bit(idx);
 261   }
 262   _region_counts[int(partition)] -= high_idx + 1 - low_idx;
 263   shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx);
 264 }
 265 
 266 void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) {
 267 
 268   // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
 269   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 270   assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
 271   assert (in_free_set(partition, idx), "Must be in partition to remove from partition");


 272 
 273   if (used_bytes < _region_size_bytes) {
 274     // Count the alignment pad remnant of memory as used when we retire this region
 275     increase_used(partition, _region_size_bytes - used_bytes);
 276   }
 277   _membership[int(partition)].clear_bit(idx);
 278   shrink_interval_if_boundary_modified(partition, idx);
 279   _region_counts[int(partition)]--;
 280 }
 281 
 282 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
 283   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 284   assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
 285   assert (which_partition < NumPartitions, "selected free partition must be valid");
 286   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 287 
 288   _membership[int(which_partition)].set_bit(idx);
 289   _capacity[int(which_partition)] += _region_size_bytes;
 290   _used[int(which_partition)] += _region_size_bytes - available;
 291   expand_interval_if_boundary_modified(which_partition, idx, available);
 292 
 293   _region_counts[int(which_partition)]++;









 294 }
 295 
 296 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
 297                                                                   ShenandoahFreeSetPartitionId new_partition, size_t available) {
 298   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 299   assert (orig_partition < NumPartitions, "Original partition must be valid");
 300   assert (new_partition < NumPartitions, "New partition must be valid");
 301   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 302 
 303   // Expected transitions:
 304   //  During rebuild:         Mutator => Collector
 305   //  During flip_to_gc:      Mutator empty => Collector
 306   // At start of update refs: Collector => Mutator
 307   assert (((available <= _region_size_bytes) &&
 308            (((orig_partition == ShenandoahFreeSetPartitionId::Mutator)
 309              && (new_partition == ShenandoahFreeSetPartitionId::Collector)) ||
 310             ((orig_partition == ShenandoahFreeSetPartitionId::Collector)
 311              && (new_partition == ShenandoahFreeSetPartitionId::Mutator)))) ||
 312           ((available == _region_size_bytes) &&
 313            ((orig_partition == ShenandoahFreeSetPartitionId::Mutator)
 314             && (new_partition == ShenandoahFreeSetPartitionId::Collector))), "Unexpected movement between partitions");
 315 
 316   size_t used = _region_size_bytes - available;
 317 
 318   _membership[int(orig_partition)].clear_bit(idx);
 319   _membership[int(new_partition)].set_bit(idx);
 320 
 321   _capacity[int(orig_partition)] -= _region_size_bytes;
 322   _used[int(orig_partition)] -= used;
 323   shrink_interval_if_boundary_modified(orig_partition, idx);
 324 
 325   _capacity[int(new_partition)] += _region_size_bytes;;
 326   _used[int(new_partition)] += used;
 327   expand_interval_if_boundary_modified(new_partition, idx, available);
 328 
 329   _region_counts[int(orig_partition)]--;
 330   _region_counts[int(new_partition)]++;
 331 }
 332 
 333 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
 334   return partition_name(membership(idx));
 335 }
 336 
 337 inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t idx) const {
 338   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 339   ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree;
 340   for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) {
 341     if (_membership[partition_id].is_set(idx)) {
 342       assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition");
 343       result = (ShenandoahFreeSetPartitionId) partition_id;
 344     }
 345   }
 346   return result;
 347 }
 348 
 349 #ifdef ASSERT
 350 inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const {

 351   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 352   assert (test_partition < ShenandoahFreeSetPartitionId::NotFree, "must be a valid partition");
 353 
 354   return membership(idx) == test_partition;
 355 }
 356 #endif
 357 
 358 inline bool ShenandoahRegionPartitions::is_empty(ShenandoahFreeSetPartitionId which_partition) const {
 359   assert (which_partition < NumPartitions, "selected free partition must be valid");
 360   return (leftmost(which_partition) > rightmost(which_partition));
 361 }
 362 
 363 inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_region(
 364   ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const {
 365   idx_t rightmost_idx = rightmost(which_partition);
 366   idx_t leftmost_idx = leftmost(which_partition);
 367   if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
 368   if (start_index < leftmost_idx) {
 369     start_index = leftmost_idx;
 370   }
 371   idx_t result = _membership[int(which_partition)].find_first_set_bit(start_index, rightmost_idx + 1);
 372   if (result > rightmost_idx) {
 373     result = _max;
 374   }
 375   assert (result >= start_index, "Requires progress");
 376   return result;
 377 }
 378 
 379 inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_region(
 380   ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const {
 381   idx_t rightmost_idx = rightmost(which_partition);
 382   idx_t leftmost_idx = leftmost(which_partition);
 383   // if (leftmost_idx == max) then (last_index < leftmost_idx)
 384   if (last_index < leftmost_idx) return -1;
 385   if (last_index > rightmost_idx) {
 386     last_index = rightmost_idx;
 387   }
 388   idx_t result = _membership[int(which_partition)].find_last_set_bit(-1, last_index);
 389   if (result < leftmost_idx) {
 390     result = -1;
 391   }
 392   assert (result <= last_index, "Requires progress");
 393   return result;
 394 }
 395 
 396 inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_cluster_of_regions(
 397   ShenandoahFreeSetPartitionId which_partition, idx_t start_index, size_t cluster_size) const {
 398   idx_t rightmost_idx = rightmost(which_partition);
 399   idx_t leftmost_idx = leftmost(which_partition);
 400   if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
 401   idx_t result = _membership[int(which_partition)].find_first_consecutive_set_bits(start_index, rightmost_idx + 1, cluster_size);
 402   if (result > rightmost_idx) {
 403     result = _max;
 404   }
 405   assert (result >= start_index, "Requires progress");
 406   return result;
 407 }
 408 
 409 inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_cluster_of_regions(
 410   ShenandoahFreeSetPartitionId which_partition, idx_t last_index, size_t cluster_size) const {
 411   idx_t leftmost_idx = leftmost(which_partition);
 412   // if (leftmost_idx == max) then (last_index < leftmost_idx)
 413   if (last_index < leftmost_idx) return -1;
 414   idx_t result = _membership[int(which_partition)].find_last_consecutive_set_bits(leftmost_idx - 1, last_index, cluster_size);
 415   if (result <= leftmost_idx) {
 416     result = -1;
 417   }
 418   assert (result <= last_index, "Requires progress");
 419   return result;
 420 }
 421 
 422 idx_t ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId which_partition) {
 423   assert (which_partition < NumPartitions, "selected free partition must be valid");
 424   idx_t max_regions = _max;
 425   if (_leftmosts_empty[int(which_partition)] == _max) {
 426     return _max;
 427   }
 428   for (idx_t idx = find_index_of_next_available_region(which_partition, _leftmosts_empty[int(which_partition)]);
 429        idx < max_regions; ) {
 430     assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 431     if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
 432       _leftmosts_empty[int(which_partition)] = idx;
 433       return idx;
 434     }
 435     idx = find_index_of_next_available_region(which_partition, idx + 1);
 436   }
 437   _leftmosts_empty[int(which_partition)] = _max;
 438   _rightmosts_empty[int(which_partition)] = -1;
 439   return _max;
 440 }
 441 
 442 idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId which_partition) {
 443   assert (which_partition < NumPartitions, "selected free partition must be valid");
 444   if (_rightmosts_empty[int(which_partition)] < 0) {
 445     return -1;
 446   }
 447   for (idx_t idx = find_index_of_previous_available_region(which_partition, _rightmosts_empty[int(which_partition)]);
 448        idx >= 0; ) {
 449     assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 450     if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
 451       _rightmosts_empty[int(which_partition)] = idx;
 452       return idx;
 453     }
 454     idx = find_index_of_previous_available_region(which_partition, idx - 1);
 455   }
 456   _leftmosts_empty[int(which_partition)] = _max;
 457   _rightmosts_empty[int(which_partition)] = -1;
 458   return -1;
 459 }
 460 




































 461 
 462 #ifdef ASSERT
 463 void ShenandoahRegionPartitions::assert_bounds() {
 464 
 465   idx_t leftmosts[UIntNumPartitions];
 466   idx_t rightmosts[UIntNumPartitions];
 467   idx_t empty_leftmosts[UIntNumPartitions];
 468   idx_t empty_rightmosts[UIntNumPartitions];
 469 
 470   for (uint i = 0; i < UIntNumPartitions; i++) {
 471     leftmosts[i] = _max;
 472     empty_leftmosts[i] = _max;
 473     rightmosts[i] = -1;
 474     empty_rightmosts[i] = -1;
 475   }
 476 
 477   for (idx_t i = 0; i < _max; i++) {
 478     ShenandoahFreeSetPartitionId partition = membership(i);
 479     switch (partition) {
 480       case ShenandoahFreeSetPartitionId::NotFree:
 481         break;
 482 
 483       case ShenandoahFreeSetPartitionId::Mutator:
 484       case ShenandoahFreeSetPartitionId::Collector:

 485       {
 486         size_t capacity = _free_set->alloc_capacity(i);
 487         bool is_empty = (capacity == _region_size_bytes);
 488         assert(capacity > 0, "free regions must have allocation capacity");
 489         if (i < leftmosts[int(partition)]) {
 490           leftmosts[int(partition)] = i;
 491         }
 492         if (is_empty && (i < empty_leftmosts[int(partition)])) {
 493           empty_leftmosts[int(partition)] = i;
 494         }
 495         if (i > rightmosts[int(partition)]) {
 496           rightmosts[int(partition)] = i;
 497         }
 498         if (is_empty && (i > empty_rightmosts[int(partition)])) {
 499           empty_rightmosts[int(partition)] = i;
 500         }
 501         break;
 502       }
 503 

 504       default:
 505         ShouldNotReachHere();
 506     }
 507   }
 508 
 509   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 510   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) <= _max,
 511           "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Mutator),  _max);
 512   assert (rightmost(ShenandoahFreeSetPartitionId::Mutator) < _max,
 513           "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator),  _max);
 514 
 515   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
 516           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
 517           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::Mutator));
 518   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
 519           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
 520           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator));
 521 
 522   // If Mutator partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 523   // Likewise for empty region partitions.
 524   idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 525   idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 526   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
 527           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 528           beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
 529   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
 530           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 531           end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
 532 
 533   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 534   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 535   assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
 536           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 537           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 538   assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
 539           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 540           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 541 
 542   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 543   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 544           leftmost(ShenandoahFreeSetPartitionId::Collector),  _max);
 545   assert (rightmost(ShenandoahFreeSetPartitionId::Collector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 546           rightmost(ShenandoahFreeSetPartitionId::Collector),  _max);
 547 
 548   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
 549           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
 550           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::Collector));
 551   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
 552           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
 553           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Collector));
 554 
 555   // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 556   // Likewise for empty region partitions.
 557   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 558   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 559   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
 560           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 561           beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
 562   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
 563           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 564           end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
 565 
 566   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 567   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 568   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 569           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 570           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
 571   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 572           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 573           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));






 574 }
 575 #endif
 576 
 577 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 578   _heap(heap),
 579   _partitions(max_regions, this),
 580   _right_to_left_bias(false),
 581   _alloc_bias_weight(0)
 582 {
 583   clear_internal();
 584 }
 585 


















































































 586 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 587   shenandoah_assert_heaplocked();
 588 
 589   // Scan the bitmap looking for a first fit.
 590   //
 591   // Leftmost and rightmost bounds provide enough caching to quickly find a region from which to allocate.

 592   //
 593   // Allocations are biased: GC allocations are taken from the high end of the heap.  Regular (and TLAB)
 594   // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
 595   // Humongous mutator allocations are taken from the bottom of the heap.
 596   //
 597   // Free set maintains mutator and collector partitions.  Mutator can only allocate from the
 598   // Mutator partition.  Collector prefers to allocate from the Collector partition, but may steal
 599   // regions from the Mutator partition if the Collector partition has been depleted.




















 600 





 601   switch (req.type()) {
 602     case ShenandoahAllocRequest::_alloc_tlab:
 603     case ShenandoahAllocRequest::_alloc_shared: {
 604       // Try to allocate in the mutator view
 605       if (_alloc_bias_weight-- <= 0) {
 606         // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
 607         // of the heap.  Typically, these are the more recently engaged regions and the objects in these regions have not
 608         // yet had a chance to die (and/or are treated as floating garbage).  If we use the same allocation bias on each
 609         // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
 610         // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
 611         // during the most recent GC pass may once again prevent the region from being collected.  We have found that
 612         // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
 613         // benchmarks.  In the best case, this has the effect of consuming these partially consumed regions before
 614         // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
 615         //
 616         // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
 617         // available regions.  Other potential benefits:
 618         //  1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
 619         //  2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
 620         //     late in the GC cycle.
 621         idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
 622                                      - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
 623         idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
 624                                       - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 625         _right_to_left_bias = (non_empty_on_right > non_empty_on_left);
 626         _alloc_bias_weight = _InitialAllocBiasWeight;
 627       }
 628       if (_right_to_left_bias) {
 629         // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
 630         if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 631           // Use signed idx.  Otherwise, loop will never terminate.
 632           idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
 633           for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
 634             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 635                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 636             ShenandoahHeapRegion* r = _heap->get_region(idx);
 637             // try_allocate_in() increases used if the allocation is successful.
 638             HeapWord* result;
 639             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 640             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 641               return result;
 642             }
 643             idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 644           }
 645         }
 646       } else {
 647         // Allocate from low to high memory.  This keeps the range of fully empty regions more tightly packed.
 648         // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle.  So this
 649         // tends to accumulate "fragmented" uncollected regions in high memory.
 650         if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 651           // Use signed idx.  Otherwise, loop will never terminate.
 652           idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
 653           for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
 654             assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 655                    "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 656             ShenandoahHeapRegion* r = _heap->get_region(idx);
 657             // try_allocate_in() increases used if the allocation is successful.
 658             HeapWord* result;
 659             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 660             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 661               return result;
 662             }
 663             idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
 664           }
 665         }
 666       }
 667       // There is no recovery. Mutator does not touch collector view at all.
 668       break;
 669     }
 670     case ShenandoahAllocRequest::_alloc_gclab:
 671       // GCLABs are for evacuation so we must be in evacuation phase.




 672 
 673     case ShenandoahAllocRequest::_alloc_shared_gc: {
 674       // Fast-path: try to allocate in the collector view first
 675       idx_t leftmost_collector = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
 676       for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx >= leftmost_collector; ) {
 677         assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
 678                "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 679         HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);














 680         if (result != nullptr) {
 681           return result;
 682         }
 683         idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Collector, idx - 1);










 684       }
 685 
 686       // No dice. Can we borrow space from mutator view?
 687       if (!ShenandoahEvacReserveOverflow) {
 688         return nullptr;
 689       }
 690 
 691       // Try to steal an empty region from the mutator view.
 692       idx_t leftmost_mutator_empty = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 693       for (idx_t idx = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost_mutator_empty; ) {
 694         assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
 695                "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
 696         ShenandoahHeapRegion* r = _heap->get_region(idx);
 697         if (can_allocate_from(r)) {
 698           flip_to_gc(r);
 699           HeapWord *result = try_allocate_in(r, req, in_new_region);
 700           if (result != nullptr) {
 701             log_debug(gc)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 702             return result;
























 703           }
 704         }
 705         idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
 706       }
 707 
 708       // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
 709       // due to GC allocations would expose unparsable mutator allocations.

 710       break;
 711     }
 712     default:
 713       ShouldNotReachHere();
 714   }
 715   return nullptr;
 716 }
 717 




















































 718 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 719   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 720   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
 721     return nullptr;
 722   }
 723 
 724   HeapWord* result = nullptr;
 725   try_recycle_trashed(r);























 726   in_new_region = r->is_empty();

 727 
 728   if (in_new_region) {
 729     log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 730                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
 731   }
 732 
 733   // req.size() is in words, r->free() is in bytes.
 734   if (req.is_lab_alloc()) {
 735     // This is a GCLAB or a TLAB allocation
 736     size_t adjusted_size = req.size();
 737     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 738     if (adjusted_size > free) {
 739       adjusted_size = free;
 740     }
 741     if (adjusted_size >= req.min_size()) {
 742       result = r->allocate(adjusted_size, req.type());
 743       log_debug(gc)("Allocated " SIZE_FORMAT " words (adjusted from " SIZE_FORMAT ") for %s @" PTR_FORMAT
 744                           " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT,
 745                           adjusted_size, req.size(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result),
 746                           _partitions.partition_membership_name(r->index()), r->index(), r->free());
 747       assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
 748       req.set_actual_size(adjusted_size);




 749     } else {
 750       log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
 751                           " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());












 752     }
 753   } else {
 754     size_t size = req.size();
 755     result = r->allocate(size, req.type());
 756     if (result != nullptr) {
 757       // Record actual allocation size
 758       log_debug(gc)("Allocated " SIZE_FORMAT " words for %s @" PTR_FORMAT
 759                           " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT,
 760                           size, ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result),
 761                           _partitions.partition_membership_name(r->index()),  r->index(), r->free());
 762       req.set_actual_size(size);
 763     }
 764   }
 765 

 766   if (result != nullptr) {
 767     // Allocation successful, bump stats:
 768     if (req.is_mutator_alloc()) {
 769       _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);

 770     } else {
 771       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
 772 
 773       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
 774       // evacuation are not updated during evacuation.





 775       r->set_update_watermark(r->top());




 776     }
 777   }
 778 
 779   static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
 780   size_t ac = alloc_capacity(r);
 781 
 782   if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
 783     // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
 784     // Note that retire_from_partition() increases used to account for waste.
 785 
 786     // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
 787     // then retire the region so that subsequent searches can find available memory more quickly.
 788 

 789     size_t idx = r->index();
 790     _partitions.retire_from_partition(req.is_mutator_alloc()?
 791                                       ShenandoahFreeSetPartitionId::Mutator: ShenandoahFreeSetPartitionId::Collector,
 792                                       idx, r->used());
 793     _partitions.assert_bounds();











 794   }
 795   return result;
 796 }
 797 
 798 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 799   assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
 800   shenandoah_assert_heaplocked();
 801 
 802   size_t words_size = req.size();
 803   idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);



 804 
 805   // Check if there are enough regions left to satisfy allocation.
 806   if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
 807     return nullptr;







 808   }
 809 
 810   idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
 811   idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
 812   idx_t last_possible_start = end_range - num;
 813 
 814   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 815   // inclusive. Contiguous allocations are biased to the beginning.
 816   idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
 817                                                                             start_range, num);
 818   if (beg > last_possible_start) {
 819     // Hit the end, goodbye
 820     return nullptr;
 821   }
 822   idx_t end = beg;
 823 
 824   while (true) {
 825     // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
 826     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.  If we can extend
 827     // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
 828     while (!can_allocate_from(_heap->get_region(end))) {
 829       // region[end] is not empty, so we restart our search after region[end]
 830       idx_t slide_delta = end + 1 - beg;
 831       if (beg + slide_delta > last_possible_start) {
 832         // no room to slide
 833         return nullptr;
 834       }
 835       for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
 836         if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
 837           beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
 838                                                                             span_end + 1, num);
 839           break;
 840         } else {
 841           beg++;
 842           span_end++;
 843         }
 844       }
 845       // Here, either beg identifies a range of num regions all of which are in the Mutator free set, or beg > last_possible_start
 846       if (beg > last_possible_start) {
 847         // Hit the end, goodbye
 848         return nullptr;
 849       }
 850       end = beg;
 851     }
 852 
 853     if ((end - beg + 1) == num) {
 854       // found the match
 855       break;
 856     }
 857 
 858     end++;
 859   }
 860 
 861   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();


 862   // Initialize regions:
 863   for (idx_t i = beg; i <= end; i++) {
 864     ShenandoahHeapRegion* r = _heap->get_region(i);
 865     try_recycle_trashed(r);
 866 
 867     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 868     assert(r->is_empty(), "Should be empty");
 869 
 870     if (i == beg) {
 871       r->make_humongous_start();
 872     } else {
 873       r->make_humongous_cont();
 874     }
 875 
 876     // Trailing region may be non-full, record the remainder there
 877     size_t used_words;
 878     if ((i == end) && (remainder != 0)) {
 879       used_words = remainder;
 880     } else {
 881       used_words = ShenandoahHeapRegion::region_size_words();
 882     }
 883 


 884     r->set_top(r->bottom() + used_words);
 885   }
 886 
 887   if (remainder != 0) {
 888     // Record this remainder as allocation waste
 889     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 890   }
 891 
 892   // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
 893   _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
 894 
 895   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
 896   _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
 897   _partitions.assert_bounds();
 898   req.set_actual_size(words_size);



 899   return _heap->get_region(beg)->bottom();
 900 }
 901 






























 902 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 903   if (r->is_trash()) {
 904     _heap->decrease_used(r->used());
 905     r->recycle();
 906   }
 907 }
 908 
 909 void ShenandoahFreeSet::recycle_trash() {
 910   // lock is not reentrable, check we don't have it
 911   shenandoah_assert_not_heaplocked();
 912 
 913   for (size_t i = 0; i < _heap->num_regions(); i++) {
 914     ShenandoahHeapRegion* r = _heap->get_region(i);
 915     if (r->is_trash()) {
 916       ShenandoahHeapLocker locker(_heap->lock());
 917       try_recycle_trashed(r);
 918     }
 919     SpinPause(); // allow allocators to take the lock
 920   }
 921 }
 922 





















 923 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 924   size_t idx = r->index();
 925 
 926   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
 927   assert(can_allocate_from(r), "Should not be allocated");
 928 
 929   size_t ac = alloc_capacity(r);
 930   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
 931                                                ShenandoahFreeSetPartitionId::Collector, ac);
 932   _partitions.assert_bounds();
 933 
 934   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
 935   // to recycle trash before attempting to allocate anything in the region.
 936 }
 937 
 938 void ShenandoahFreeSet::clear() {
 939   shenandoah_assert_heaplocked();
 940   clear_internal();
 941 }
 942 
 943 void ShenandoahFreeSet::clear_internal() {
 944   _partitions.make_all_regions_unavailable();
 945 }
 946 
 947 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &cset_regions) {
 948 
 949   cset_regions = 0;
 950   clear_internal();
 951   size_t region_size_bytes = _partitions.region_size_bytes();
 952   size_t max_regions = _partitions.max_regions();
 953 
 954   size_t mutator_leftmost = max_regions;
 955   size_t mutator_rightmost = 0;
 956   size_t mutator_leftmost_empty = max_regions;
 957   size_t mutator_rightmost_empty = 0;
 958 
 959   size_t mutator_regions = 0;
 960   size_t mutator_used = 0;
 961 
 962   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
 963     ShenandoahHeapRegion* region = _heap->get_region(idx);
 964     if (region->is_trash()) {
 965       // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
 966       // The cset regions are not "trashed" until we have finished update refs.
 967       cset_regions++;










 968     }
 969     if (region->is_alloc_allowed() || region->is_trash()) {


 970 
 971       // Do not add regions that would almost surely fail allocation
 972       size_t ac = alloc_capacity(region);
 973       if (ac > PLAB::min_size() * HeapWordSize) {
 974         _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
 975 
 976         if (idx < mutator_leftmost) {
 977           mutator_leftmost = idx;
 978         }
 979         if (idx > mutator_rightmost) {
 980           mutator_rightmost = idx;
 981         }
 982         if (ac == region_size_bytes) {
 983           if (idx < mutator_leftmost_empty) {
 984             mutator_leftmost_empty = idx;
 985           }
 986           if (idx > mutator_rightmost_empty) {
 987             mutator_rightmost_empty = idx;
 988           }
 989         }
 990         mutator_regions++;
 991         mutator_used += (region_size_bytes - ac);
 992 
 993         log_debug(gc)(
 994           "  Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator partition",








 995           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
 996           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
 997       }
 998     }
 999   }
1000   _partitions.establish_mutator_intervals(mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
1001                                           mutator_regions, mutator_used);
1002 }
1003 
1004 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {


1005   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1006   size_t collector_empty_xfer = 0;
1007   size_t collector_not_empty_xfer = 0;

1008 
1009   // Process empty regions within the Collector free partition
1010   if ((max_xfer_regions > 0) &&
1011       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1012        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1013     ShenandoahHeapLocker locker(_heap->lock());
1014     idx_t rightmost = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector);
1015     for (idx_t idx = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector);
1016          (max_xfer_regions > 0) && (idx <= rightmost); ) {
1017       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
1018              "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1019       // Note: can_allocate_from() denotes that region is entirely empty
1020       if (can_allocate_from(idx)) {
1021         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector,
1022                                                      ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1023         max_xfer_regions--;
1024         collector_empty_xfer += region_size_bytes;
1025       }
1026       idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1027     }
1028   }
1029 
1030   // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1031   if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1032                                  <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1033     ShenandoahHeapLocker locker(_heap->lock());
1034     idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
1035     for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1036          (max_xfer_regions > 0) && (idx <= rightmost); ) {
1037       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
1038              "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1039       size_t ac = alloc_capacity(idx);
1040       if (ac > 0) {
1041         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector,
1042                                                      ShenandoahFreeSetPartitionId::Mutator, ac);
1043         max_xfer_regions--;
1044         collector_not_empty_xfer += ac;

















1045       }
1046       idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1047     }
1048   }
1049 
1050   size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
1051   log_info(gc)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve",
1052                byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer));




1053 }
1054 
1055 void ShenandoahFreeSet::prepare_to_rebuild(size_t &cset_regions) {
1056   shenandoah_assert_heaplocked();
1057 
1058   log_debug(gc)("Rebuilding FreeSet");






1059 
1060   // This places regions that have alloc_capacity into the mutator partition.
1061   find_regions_with_alloc_capacity(cset_regions);

1062 }
1063 
1064 void ShenandoahFreeSet::finish_rebuild(size_t cset_regions) {
1065   shenandoah_assert_heaplocked();

1066 
1067   // Our desire is to reserve this much memory for future evacuation.  We may end up reserving less, if
1068   // memory is in short supply.
1069 
1070   size_t reserve = _heap->max_capacity() * ShenandoahEvacReserve / 100;
1071   size_t available_in_collector_partition = (_partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector)
1072                                              - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector));
1073   size_t additional_reserve;
1074   if (available_in_collector_partition < reserve) {
1075     additional_reserve = reserve - available_in_collector_partition;
1076   } else {
1077     additional_reserve = 0;


1078   }
1079 
1080   reserve_regions(reserve);
1081   _partitions.assert_bounds();

1082   log_status();
1083 }
1084 
1085 void ShenandoahFreeSet::rebuild() {
1086   size_t cset_regions;
1087   prepare_to_rebuild(cset_regions);
1088   finish_rebuild(cset_regions);































































1089 }
1090 
1091 void ShenandoahFreeSet::reserve_regions(size_t to_reserve) {





1092   for (size_t i = _heap->num_regions(); i > 0; i--) {
1093     size_t idx = i - 1;
1094     ShenandoahHeapRegion* r = _heap->get_region(idx);
1095 
1096     if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1097       continue;
1098     }
1099 
1100     size_t ac = alloc_capacity(r);
1101     assert (ac > 0, "Membership in free partition implies has capacity");




1102 
1103     bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1104     if (!move_to_collector) {
1105       // We've satisfied to_reserve
1106       break;
1107     }
1108 
1109     if (move_to_collector) {









1110       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1111       // they were entirely empty.  This has the effect of causing new Mutator allocation to reside next to objects
1112       // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1113       // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1114       // survivor objects is less likely to be selected for the collection set.  This alternative implementation allows
1115       // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1116       // occupy regions comprised entirely of ephemeral objects.  These regions are highly likely to be included in the next
1117       // collection set, and they are easily evacuated because they have low density of live objects.
1118       _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1119                                                    ShenandoahFreeSetPartitionId::Collector, ac);
1120       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1121     }
1122   }
1123 
1124   if (LogTarget(Info, gc, free)::is_enabled()) {
1125     size_t reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector);
1126     if (reserve < to_reserve) {
1127       log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1128                     PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));





1129     }
1130   }
1131 }
1132 
1133 void ShenandoahFreeSet::log_status() {
1134   shenandoah_assert_heaplocked();
1135 
1136 #ifdef ASSERT
1137   // Dump of the FreeSet details is only enabled if assertions are enabled
1138   if (LogTarget(Debug, gc, free)::is_enabled()) {
1139 #define BUFFER_SIZE 80




1140     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();


1141     size_t consumed_collector = 0;
1142     size_t available_collector = 0;
1143     size_t consumed_mutator = 0;


1144     size_t available_mutator = 0;


1145 
1146     char buffer[BUFFER_SIZE];
1147     for (uint i = 0; i < BUFFER_SIZE; i++) {
1148       buffer[i] = '\0';
1149     }
1150     log_debug(gc)("FreeSet map legend: M:mutator_free C:collector_free H:humongous _:retired");
1151     log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "],"
1152                   " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "]",
1153                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1154                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1155                   _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1156                   _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));



1157 
1158     for (uint i = 0; i < _heap->num_regions(); i++) {
1159       ShenandoahHeapRegion *r = _heap->get_region(i);
1160       uint idx = i % 64;
1161       if ((i != 0) && (idx == 0)) {
1162         log_debug(gc)(" %6u: %s", i-64, buffer);
1163       }
1164       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {

1165         size_t capacity = alloc_capacity(r);
1166         available_mutator += capacity;
1167         consumed_mutator += region_size_bytes - capacity;
1168         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1169       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {

1170         size_t capacity = alloc_capacity(r);
1171         available_collector += capacity;
1172         consumed_collector += region_size_bytes - capacity;
1173         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';





1174       } else if (r->is_humongous()) {
1175         buffer[idx] = 'h';






1176       } else {
1177         buffer[idx] = '_';








1178       }
1179     }
1180     uint remnant = _heap->num_regions() % 64;
1181     if (remnant > 0) {
1182       buffer[remnant] = '\0';
1183     } else {
1184       remnant = 64;
1185     }
1186     log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);


1187   }
1188 #endif
1189 
1190   LogTarget(Info, gc, free) lt;
1191   if (lt.is_enabled()) {
1192     ResourceMark rm;
1193     LogStream ls(lt);
1194 
1195     {
1196       idx_t last_idx = 0;
1197       size_t max = 0;
1198       size_t max_contig = 0;
1199       size_t empty_contig = 0;
1200 
1201       size_t total_used = 0;
1202       size_t total_free = 0;
1203       size_t total_free_ext = 0;
1204 
1205       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
1206            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) {
1207         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1208           ShenandoahHeapRegion *r = _heap->get_region(idx);
1209           size_t free = alloc_capacity(r);
1210           max = MAX2(max, free);
1211           if (r->is_empty()) {
1212             total_free_ext += free;
1213             if (last_idx + 1 == idx) {
1214               empty_contig++;
1215             } else {
1216               empty_contig = 1;
1217             }
1218           } else {
1219             empty_contig = 0;
1220           }
1221           total_used += r->used();
1222           total_free += free;
1223           max_contig = MAX2(max_contig, empty_contig);
1224           last_idx = idx;
1225         }
1226       }
1227 
1228       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1229       size_t free = capacity() - used();
1230 
1231       // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1232       // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1233       // my internally tracked values of used() and free().
1234       assert(free == total_free, "Free memory should match");
1235 
1236       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1237                byte_size_in_proper_unit(free),          proper_unit_for_byte_size(free),
1238                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1239                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1240       );
1241 
1242       ls.print("Frag: ");
1243       size_t frag_ext;
1244       if (total_free_ext > 0) {
1245         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1246       } else {
1247         frag_ext = 0;
1248       }
1249       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1250 
1251       size_t frag_int;
1252       if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1253         frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1254                     / ShenandoahHeapRegion::region_size_bytes());
1255       } else {
1256         frag_int = 0;
1257       }
1258       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
1259       ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
1260                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used),
1261                _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
1262     }
1263 
1264     {
1265       size_t max = 0;
1266       size_t total_free = 0;
1267       size_t total_used = 0;
1268 
1269       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1270            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1271         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
1272           ShenandoahHeapRegion *r = _heap->get_region(idx);
1273           size_t free = alloc_capacity(r);
1274           max = MAX2(max, free);
1275           total_free += free;
1276           total_used += r->used();
1277         }
1278       }
1279       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1280                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1281                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1282                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1283     }




















1284   }
1285 }
1286 
1287 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1288   shenandoah_assert_heaplocked();


1289   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
1290     switch (req.type()) {
1291       case ShenandoahAllocRequest::_alloc_shared:
1292       case ShenandoahAllocRequest::_alloc_shared_gc:
1293         in_new_region = true;
1294         return allocate_contiguous(req);

1295       case ShenandoahAllocRequest::_alloc_gclab:
1296       case ShenandoahAllocRequest::_alloc_tlab:
1297         in_new_region = false;
1298         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
1299                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
1300         return nullptr;
1301       default:
1302         ShouldNotReachHere();
1303         return nullptr;
1304     }
1305   } else {
1306     return allocate_single(req, in_new_region);
1307   }
1308 }
1309 
















1310 void ShenandoahFreeSet::print_on(outputStream* out) const {
1311   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
1312   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1313   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1314     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1315            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1316     _heap->get_region(index)->print_on(out);
1317     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);




1318   }
1319   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
1320   rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
1321   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
1322     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
1323            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1324     _heap->get_region(index)->print_on(out);
1325     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);
1326   }
1327 }
1328 





















1329 double ShenandoahFreeSet::internal_fragmentation() {
1330   double squared = 0;
1331   double linear = 0;
1332   int count = 0;
1333 
1334   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1335   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1336     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1337            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1338     ShenandoahHeapRegion* r = _heap->get_region(index);
1339     size_t used = r->used();
1340     squared += used * used;
1341     linear += used;
1342     count++;
1343     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1344   }
1345 
1346   if (count > 0) {
1347     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
1348     return 1 - s;
1349   } else {
1350     return 0;
1351   }
1352 }
1353 













1354 double ShenandoahFreeSet::external_fragmentation() {
1355   idx_t last_idx = 0;
1356   size_t max_contig = 0;
1357   size_t empty_contig = 0;
1358 
1359   size_t free = 0;
1360 
1361   idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1362   for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1363     assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1364            "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1365     ShenandoahHeapRegion* r = _heap->get_region(index);
1366     if (r->is_empty()) {
1367       free += ShenandoahHeapRegion::region_size_bytes();
1368       if (last_idx + 1 == index) {
1369         empty_contig++;

1370       } else {
1371         empty_contig = 1;
1372       }
1373     } else {
1374       empty_contig = 0;

1375     }
1376     max_contig = MAX2(max_contig, empty_contig);
1377     last_idx = index;
1378     index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1379   }
1380 
1381   if (free > 0) {
1382     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
1383   } else {
1384     return 0;
1385   }
1386 }
1387 

   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "runtime/orderAccess.hpp"
  40 
  41 ShenandoahSetsOfFree::ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set) :





















































  42     _max(max_regions),

  43     _free_set(free_set),
  44     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes())
  45 {
  46   _membership = NEW_C_HEAP_ARRAY(ShenandoahFreeMemoryType, max_regions, mtGC);
  47   clear_internal();
  48 }
  49 
  50 ShenandoahSetsOfFree::~ShenandoahSetsOfFree() {
  51   FREE_C_HEAP_ARRAY(ShenandoahFreeMemoryType, _membership);
  52 }
  53 




  54 
  55 void ShenandoahSetsOfFree::clear_internal() {
  56   for (size_t idx = 0; idx < _max; idx++) {
  57     _membership[idx] = NotFree;



  58   }






  59 
  60   for (size_t idx = 0; idx < NumFreeSets; idx++) {
  61     _leftmosts[idx] = _max;
  62     _rightmosts[idx] = 0;
  63     _leftmosts_empty[idx] = _max;
  64     _rightmosts_empty[idx] = 0;
  65     _capacity_of[idx] = 0;
  66     _used_by[idx] = 0;







  67   }

  68 
  69   _left_to_right_bias[Mutator] = true;
  70   _left_to_right_bias[Collector] = false;
  71   _left_to_right_bias[OldCollector] = false;





  72 
  73   _region_counts[Mutator] = 0;
  74   _region_counts[Collector] = 0;
  75   _region_counts[OldCollector] = 0;
  76   _region_counts[NotFree] = _max;







  77 }
  78 
  79 void ShenandoahSetsOfFree::clear_all() {
  80   clear_internal();



















  81 }
  82 
  83 void ShenandoahSetsOfFree::increase_used(ShenandoahFreeMemoryType which_set, size_t bytes) {
  84   assert (which_set > NotFree && which_set < NumFreeSets, "Set must correspond to a valid freeset");
  85   _used_by[which_set] += bytes;
  86   assert (_used_by[which_set] <= _capacity_of[which_set],
  87           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
  88           _used_by[which_set], _capacity_of[which_set], bytes);
  89 }
  90 
  91 inline void ShenandoahSetsOfFree::shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx) {
  92   if (idx == _leftmosts[set]) {
  93     while ((_leftmosts[set] < _max) && !in_free_set(_leftmosts[set], set)) {
  94       _leftmosts[set]++;





  95     }
  96     if (_leftmosts_empty[set] < _leftmosts[set]) {
  97       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
  98       _leftmosts_empty[set] = _leftmosts[set];
  99     }
 100   }
 101   if (idx == _rightmosts[set]) {
 102     while (_rightmosts[set] > 0 && !in_free_set(_rightmosts[set], set)) {
 103       _rightmosts[set]--;



 104     }
 105     if (_rightmosts_empty[set] > _rightmosts[set]) {
 106       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 107       _rightmosts_empty[set] = _rightmosts[set];
 108     }
 109   }










 110 }
 111 
 112 inline void ShenandoahSetsOfFree::expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t region_capacity) {
 113   if (region_capacity == _region_size_bytes) {
 114     if (_leftmosts_empty[set] > idx) {
 115       _leftmosts_empty[set] = idx;







 116     }
 117     if (_rightmosts_empty[set] < idx) {
 118       _rightmosts_empty[set] = idx;
 119     }
 120   }
 121   if (_leftmosts[set] > idx) {
 122     _leftmosts[set] = idx;
 123   }
 124   if (_rightmosts[set] < idx) {
 125     _rightmosts[set] = idx;








 126   }


 127 }
 128 
 129 void ShenandoahSetsOfFree::remove_from_free_sets(size_t idx) {


 130   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 131   ShenandoahFreeMemoryType orig_set = membership(idx);
 132   assert (orig_set > NotFree && orig_set < NumFreeSets, "Cannot remove from free sets if not already free");
 133   _membership[idx] = NotFree;
 134   shrink_bounds_if_touched(orig_set, idx);
 135 
 136   _region_counts[orig_set]--;
 137   _region_counts[NotFree]++;





 138 }
 139 










 140 
 141 void ShenandoahSetsOfFree::make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity) {
 142   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 143   assert (_membership[idx] == NotFree, "Cannot make free if already free");
 144   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 145   _membership[idx] = which_set;
 146   _capacity_of[which_set] += region_capacity;
 147   expand_bounds_maybe(which_set, idx, region_capacity);
 148 
 149   _region_counts[NotFree]--;
 150   _region_counts[which_set]++;
 151 }
 152 
 153 void ShenandoahSetsOfFree::move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity) {

 154   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 155   assert ((new_set > NotFree) && (new_set < NumFreeSets), "New set must be valid");
 156   ShenandoahFreeMemoryType orig_set = _membership[idx];
 157   assert ((orig_set > NotFree) && (orig_set < NumFreeSets), "Cannot move free unless already free");

 158   // Expected transitions:
 159   //  During rebuild: Mutator => Collector
 160   //                  Mutator empty => Collector
 161   //  During flip_to_gc:
 162   //                  Mutator empty => Collector
 163   //                  Mutator empty => Old Collector
 164   // At start of update refs:
 165   //                  Collector => Mutator
 166   //                  OldCollector Empty => Mutator
 167   assert((region_capacity <= _region_size_bytes && ((orig_set == Mutator && new_set == Collector) || (orig_set == Collector && new_set == Mutator)))
 168       || (region_capacity == _region_size_bytes && ((orig_set == Mutator && new_set == Collector) || (orig_set == OldCollector && new_set == Mutator) || new_set == OldCollector)),
 169       "Unexpected movement between sets");
 170 
 171   _membership[idx] = new_set;
 172   _capacity_of[orig_set] -= region_capacity;
 173   shrink_bounds_if_touched(orig_set, idx);
 174 
 175   _capacity_of[new_set] += region_capacity;
 176   expand_bounds_maybe(new_set, idx, region_capacity);
 177 
 178   _region_counts[orig_set]--;
 179   _region_counts[new_set]++;










 180 }
 181 
 182 inline ShenandoahFreeMemoryType ShenandoahSetsOfFree::membership(size_t idx) const {
 183   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 184   return _membership[idx];







 185 }
 186 
 187   // Returns true iff region idx is in the test_set free_set.  Before returning true, asserts that the free
 188   // set is not empty.  Requires that test_set != NotFree or NumFreeSets.
 189 inline bool ShenandoahSetsOfFree::in_free_set(size_t idx, ShenandoahFreeMemoryType test_set) const {
 190   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 191   if (_membership[idx] == test_set) {
 192     assert (test_set == NotFree || _free_set->alloc_capacity(idx) > 0, "Free regions must have alloc capacity");
 193     return true;
 194   } else {
 195     return false;

















 196   }


 197 }
 198 
 199 inline size_t ShenandoahSetsOfFree::leftmost(ShenandoahFreeMemoryType which_set) const {
 200   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 201   size_t idx = _leftmosts[which_set];
 202   if (idx >= _max) {
 203     return _max;
 204   } else {
 205     assert (in_free_set(idx, which_set), "left-most region must be free");
 206     return idx;




 207   }


 208 }
 209 
 210 inline size_t ShenandoahSetsOfFree::rightmost(ShenandoahFreeMemoryType which_set) const {
 211   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 212   size_t idx = _rightmosts[which_set];
 213   assert ((_leftmosts[which_set] == _max) || in_free_set(idx, which_set), "right-most region must be free");
 214   return idx;






 215 }
 216 
 217 inline bool ShenandoahSetsOfFree::is_empty(ShenandoahFreeMemoryType which_set) const {
 218   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 219   return (leftmost(which_set) > rightmost(which_set));








 220 }
 221 
 222 size_t ShenandoahSetsOfFree::leftmost_empty(ShenandoahFreeMemoryType which_set) {
 223   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 224   for (size_t idx = _leftmosts_empty[which_set]; idx < _max; idx++) {
 225     if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
 226       _leftmosts_empty[which_set] = idx;






 227       return idx;
 228     }

 229   }
 230   _leftmosts_empty[which_set] = _max;
 231   _rightmosts_empty[which_set] = 0;
 232   return _max;
 233 }
 234 
 235 inline size_t ShenandoahSetsOfFree::rightmost_empty(ShenandoahFreeMemoryType which_set) {
 236   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 237   for (intptr_t idx = _rightmosts_empty[which_set]; idx >= 0; idx--) {
 238     if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
 239       _rightmosts_empty[which_set] = idx;





 240       return idx;
 241     }

 242   }
 243   _leftmosts_empty[which_set] = _max;
 244   _rightmosts_empty[which_set] = 0;
 245   return 0;
 246 }
 247 
 248 inline bool ShenandoahSetsOfFree::alloc_from_left_bias(ShenandoahFreeMemoryType which_set) {
 249   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 250   return _left_to_right_bias[which_set];
 251 }
 252 
 253 void ShenandoahSetsOfFree::establish_alloc_bias(ShenandoahFreeMemoryType which_set) {
 254   ShenandoahHeap* heap = ShenandoahHeap::heap();
 255   shenandoah_assert_heaplocked();
 256   assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
 257 
 258   size_t middle = (_leftmosts[which_set] + _rightmosts[which_set]) / 2;
 259   size_t available_in_first_half = 0;
 260   size_t available_in_second_half = 0;
 261 
 262   for (size_t index = _leftmosts[which_set]; index < middle; index++) {
 263     if (in_free_set(index, which_set)) {
 264       ShenandoahHeapRegion* r = heap->get_region(index);
 265       available_in_first_half += r->free();
 266     }
 267   }
 268   for (size_t index = middle; index <= _rightmosts[which_set]; index++) {
 269     if (in_free_set(index, which_set)) {
 270       ShenandoahHeapRegion* r = heap->get_region(index);
 271       available_in_second_half += r->free();
 272     }
 273   }
 274 
 275   // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
 276   // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
 277   // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
 278   // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
 279 
 280   // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the
 281   // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left.
 282   _left_to_right_bias[which_set] = (available_in_second_half > available_in_first_half);
 283 }
 284 
 285 #ifdef ASSERT
 286 void ShenandoahSetsOfFree::assert_bounds() {
 287 
 288   size_t leftmosts[NumFreeSets];
 289   size_t rightmosts[NumFreeSets];
 290   size_t empty_leftmosts[NumFreeSets];
 291   size_t empty_rightmosts[NumFreeSets];
 292 
 293   for (int i = 0; i < NumFreeSets; i++) {
 294     leftmosts[i] = _max;
 295     empty_leftmosts[i] = _max;
 296     rightmosts[i] = 0;
 297     empty_rightmosts[i] = 0;
 298   }
 299 
 300   for (size_t i = 0; i < _max; i++) {
 301     ShenandoahFreeMemoryType set = membership(i);
 302     switch (set) {
 303       case NotFree:
 304         break;
 305 
 306       case Mutator:
 307       case Collector:
 308       case OldCollector:
 309       {
 310         size_t capacity = _free_set->alloc_capacity(i);
 311         bool is_empty = (capacity == _region_size_bytes);
 312         assert(capacity > 0, "free regions must have allocation capacity");
 313         if (i < leftmosts[set]) {
 314           leftmosts[set] = i;
 315         }
 316         if (is_empty && (i < empty_leftmosts[set])) {
 317           empty_leftmosts[set] = i;
 318         }
 319         if (i > rightmosts[set]) {
 320           rightmosts[set] = i;
 321         }
 322         if (is_empty && (i > empty_rightmosts[set])) {
 323           empty_rightmosts[set] = i;
 324         }
 325         break;
 326       }
 327 
 328       case NumFreeSets:
 329       default:
 330         ShouldNotReachHere();
 331     }
 332   }
 333 
 334   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 335   assert (leftmost(Mutator) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Mutator),  _max);
 336   assert (rightmost(Mutator) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Mutator),  _max);
 337 
 338   assert (leftmost(Mutator) == _max || in_free_set(leftmost(Mutator), Mutator),
 339           "leftmost region should be free: " SIZE_FORMAT,  leftmost(Mutator));
 340   assert (leftmost(Mutator) == _max || in_free_set(rightmost(Mutator), Mutator),
 341           "rightmost region should be free: " SIZE_FORMAT, rightmost(Mutator));
 342 
 343   // If Mutator set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 344   size_t beg_off = leftmosts[Mutator];
 345   size_t end_off = rightmosts[Mutator];
 346   assert (beg_off >= leftmost(Mutator),
 347           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Mutator));
 348   assert (end_off <= rightmost(Mutator),
 349           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(Mutator));
 350 
 351   beg_off = empty_leftmosts[Mutator];
 352   end_off = empty_rightmosts[Mutator];
 353   assert (beg_off >= leftmost_empty(Mutator),
 354           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Mutator));
 355   assert (end_off <= rightmost_empty(Mutator),
 356           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(Mutator));
 357 
 358   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 359   assert (leftmost(Collector) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Collector),  _max);
 360   assert (rightmost(Collector) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Collector),  _max);
 361 
 362   assert (leftmost(Collector) == _max || in_free_set(leftmost(Collector), Collector),
 363           "leftmost region should be free: " SIZE_FORMAT,  leftmost(Collector));
 364   assert (leftmost(Collector) == _max || in_free_set(rightmost(Collector), Collector),
 365           "rightmost region should be free: " SIZE_FORMAT, rightmost(Collector));
 366 
 367   // If Collector set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 368   beg_off = leftmosts[Collector];
 369   end_off = rightmosts[Collector];
 370   assert (beg_off >= leftmost(Collector),
 371           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Collector));
 372   assert (end_off <= rightmost(Collector),
 373           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(Collector));
 374 
 375   beg_off = empty_leftmosts[Collector];
 376   end_off = empty_rightmosts[Collector];
 377   assert (beg_off >= leftmost_empty(Collector),
 378           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Collector));
 379   assert (end_off <= rightmost_empty(Collector),
 380           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(Collector));
 381 
 382   // Performance invariants. Failing these would not break the free set, but performance would suffer.
 383   assert (leftmost(OldCollector) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(OldCollector),  _max);
 384   assert (rightmost(OldCollector) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(OldCollector),  _max);
 385 
 386   assert (leftmost(OldCollector) == _max || in_free_set(leftmost(OldCollector), OldCollector),
 387           "leftmost region should be free: " SIZE_FORMAT,  leftmost(OldCollector));
 388   assert (leftmost(OldCollector) == _max || in_free_set(rightmost(OldCollector), OldCollector),
 389           "rightmost region should be free: " SIZE_FORMAT, rightmost(OldCollector));
 390 
 391   // If OldCollector set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
 392   beg_off = leftmosts[OldCollector];
 393   end_off = rightmosts[OldCollector];
 394   assert (beg_off >= leftmost(OldCollector),
 395           "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(OldCollector));
 396   assert (end_off <= rightmost(OldCollector),
 397           "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(OldCollector));
 398 
 399   beg_off = empty_leftmosts[OldCollector];
 400   end_off = empty_rightmosts[OldCollector];
 401   assert (beg_off >= leftmost_empty(OldCollector),
 402           "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(OldCollector));
 403   assert (end_off <= rightmost_empty(OldCollector),
 404           "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(OldCollector));
 405 }
 406 #endif
 407 
 408 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 409   _heap(heap),
 410   _free_sets(max_regions, this)


 411 {
 412   clear_internal();
 413 }
 414 
 415 // This allocates from a region within the old_collector_set.  If affiliation equals OLD, the allocation must be taken
 416 // from a region that is_old().  Otherwise, affiliation should be FREE, in which case this will put a previously unaffiliated
 417 // region into service.
 418 HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation affiliation,
 419                                                            ShenandoahAllocRequest& req, bool& in_new_region) {
 420   shenandoah_assert_heaplocked();
 421 
 422   size_t rightmost =
 423     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(OldCollector): _free_sets.rightmost(OldCollector);
 424   size_t leftmost =
 425     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(OldCollector): _free_sets.leftmost(OldCollector);
 426   if (_free_sets.alloc_from_left_bias(OldCollector)) {
 427     // This mode picks up stragglers left by a full GC
 428     for (size_t idx = leftmost; idx <= rightmost; idx++) {
 429       if (_free_sets.in_free_set(idx, OldCollector)) {
 430         ShenandoahHeapRegion* r = _heap->get_region(idx);
 431         assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
 432         if (r->affiliation() == affiliation) {
 433           HeapWord* result = try_allocate_in(r, req, in_new_region);
 434           if (result != nullptr) {
 435             return result;
 436           }
 437         }
 438       }
 439     }
 440   } else {
 441     // This mode picks up stragglers left by a previous concurrent GC
 442     for (size_t count = rightmost + 1; count > leftmost; count--) {
 443       // size_t is unsigned, need to dodge underflow when _leftmost = 0
 444       size_t idx = count - 1;
 445       if (_free_sets.in_free_set(idx, OldCollector)) {
 446         ShenandoahHeapRegion* r = _heap->get_region(idx);
 447         assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
 448         if (r->affiliation() == affiliation) {
 449           HeapWord* result = try_allocate_in(r, req, in_new_region);
 450           if (result != nullptr) {
 451             return result;
 452           }
 453         }
 454       }
 455     }
 456   }
 457   return nullptr;
 458 }
 459 
 460 void ShenandoahFreeSet::add_old_collector_free_region(ShenandoahHeapRegion* region) {
 461   shenandoah_assert_heaplocked();
 462   size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
 463   size_t idx = region->index();
 464   size_t capacity = alloc_capacity(region);
 465   assert(_free_sets.membership(idx) == NotFree, "Regions promoted in place should not be in any free set");
 466   if (capacity >= plab_min_size_in_bytes) {
 467     _free_sets.make_free(idx, OldCollector, capacity);
 468     _heap->old_generation()->augment_promoted_reserve(capacity);
 469   }
 470 }
 471 
 472 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation,
 473                                                        ShenandoahAllocRequest& req, bool& in_new_region) {
 474   shenandoah_assert_heaplocked();
 475   size_t rightmost =
 476     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(Collector): _free_sets.rightmost(Collector);
 477   size_t leftmost =
 478     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(Collector): _free_sets.leftmost(Collector);
 479   for (size_t c = rightmost + 1; c > leftmost; c--) {
 480     // size_t is unsigned, need to dodge underflow when _leftmost = 0
 481     size_t idx = c - 1;
 482     if (_free_sets.in_free_set(idx, Collector)) {
 483       ShenandoahHeapRegion* r = _heap->get_region(idx);
 484       if (r->affiliation() == affiliation) {
 485         HeapWord* result = try_allocate_in(r, req, in_new_region);
 486         if (result != nullptr) {
 487           return result;
 488         }
 489       }
 490     }
 491   }
 492   log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
 493                       shenandoah_affiliation_name(affiliation), p2i(&req));
 494   return nullptr;
 495 }
 496 
 497 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 498   shenandoah_assert_heaplocked();
 499 
 500   // Scan the bitmap looking for a first fit.
 501   //
 502   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 503   // we would find the region to allocate at right away.
 504   //
 505   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
 506   // go to the end. This makes application allocation faster, because we would clear lots
 507   // of regions from the beginning most of the time.
 508   //
 509   // Free set maintains mutator and collector views, and normally they allocate in their views only,
 510   // unless we special cases for stealing and mixed allocations.
 511 
 512   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 513   bool allow_new_region = true;
 514   if (_heap->mode()->is_generational()) {
 515     switch (req.affiliation()) {
 516       case ShenandoahAffiliation::OLD_GENERATION:
 517         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 518         if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
 519           allow_new_region = false;
 520         }
 521         break;
 522 
 523       case ShenandoahAffiliation::YOUNG_GENERATION:
 524         // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
 525         if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
 526           allow_new_region = false;
 527         }
 528         break;
 529 
 530       case ShenandoahAffiliation::FREE:
 531         fatal("Should request affiliation");
 532 
 533       default:
 534         ShouldNotReachHere();
 535         break;
 536     }
 537   }
 538   switch (req.type()) {
 539     case ShenandoahAllocRequest::_alloc_tlab:
 540     case ShenandoahAllocRequest::_alloc_shared: {
 541       // Try to allocate in the mutator view
 542       // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
 543       if (!_free_sets.is_empty(Mutator)) {
 544         // Use signed idx.  Otherwise, loop will never terminate.
 545         int leftmost = (int) _free_sets.leftmost(Mutator);
 546         for (int idx = (int) _free_sets.rightmost(Mutator); idx >= leftmost; idx--) {
 547           ShenandoahHeapRegion* r = _heap->get_region(idx);
 548           if (_free_sets.in_free_set(idx, Mutator) && (allow_new_region || r->is_affiliated())) {

























 549             // try_allocate_in() increases used if the allocation is successful.
 550             HeapWord* result;
 551             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
 552             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
 553               return result;
 554             }





















 555           }
 556         }
 557       }
 558       // There is no recovery. Mutator does not touch collector view at all.
 559       break;
 560     }
 561     case ShenandoahAllocRequest::_alloc_gclab:
 562       // GCLABs are for evacuation so we must be in evacuation phase.  If this allocation is successful, increment
 563       // the relevant evac_expended rather than used value.
 564 
 565     case ShenandoahAllocRequest::_alloc_plab:
 566       // PLABs always reside in old-gen and are only allocated during evacuation phase.
 567 
 568     case ShenandoahAllocRequest::_alloc_shared_gc: {
 569       if (!_heap->mode()->is_generational()) {
 570         // size_t is unsigned, need to dodge underflow when _leftmost = 0
 571         // Fast-path: try to allocate in the collector view first
 572         for (size_t c = _free_sets.rightmost(Collector) + 1; c > _free_sets.leftmost(Collector); c--) {
 573           size_t idx = c - 1;
 574           if (_free_sets.in_free_set(idx, Collector)) {
 575             HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 576             if (result != nullptr) {
 577               return result;
 578             }
 579           }
 580         }
 581       } else {
 582         // First try to fit into a region that is already in use in the same generation.
 583         HeapWord* result;
 584         if (req.is_old()) {
 585           result = allocate_old_with_affiliation(req.affiliation(), req, in_new_region);
 586         } else {
 587           result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
 588         }
 589         if (result != nullptr) {
 590           return result;
 591         }
 592         if (allow_new_region) {
 593           // Then try a free region that is dedicated to GC allocations.
 594           if (req.is_old()) {
 595             result = allocate_old_with_affiliation(FREE, req, in_new_region);
 596           } else {
 597             result = allocate_with_affiliation(FREE, req, in_new_region);
 598           }
 599           if (result != nullptr) {
 600             return result;
 601           }
 602         }
 603       }

 604       // No dice. Can we borrow space from mutator view?
 605       if (!ShenandoahEvacReserveOverflow) {
 606         return nullptr;
 607       }
 608 
 609       if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
 610         // This allows us to flip a mutator region to old_collector
 611         allow_new_region = true;
 612       }
 613 
 614       // We should expand old-gen if this can prevent an old-gen evacuation failure.  We don't care so much about
 615       // promotion failures since they can be mitigated in a subsequent GC pass.  Would be nice to know if this
 616       // allocation request is for evacuation or promotion.  Individual threads limit their use of PLAB memory for
 617       // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
 618       // only for old-gen evacuations.
 619 
 620       // Also TODO:
 621       // if (GC is idle (out of cycle) and mutator allocation fails and there is memory reserved in Collector
 622       // or OldCollector sets, transfer a region of memory so that we can satisfy the allocation request, and
 623       // immediately trigger the start of GC.  Is better to satisfy the allocation than to trigger out-of-cycle
 624       // allocation failure (even if this means we have a little less memory to handle evacuations during the
 625       // subsequent GC pass).
 626 
 627       if (allow_new_region) {
 628         // Try to steal an empty region from the mutator view.
 629         for (size_t c = _free_sets.rightmost_empty(Mutator) + 1; c > _free_sets.leftmost_empty(Mutator); c--) {
 630           size_t idx = c - 1;
 631           if (_free_sets.in_free_set(idx, Mutator)) {
 632             ShenandoahHeapRegion* r = _heap->get_region(idx);
 633             if (can_allocate_from(r)) {
 634               if (req.is_old()) {
 635                 flip_to_old_gc(r);
 636               } else {
 637                 flip_to_gc(r);
 638               }
 639               HeapWord *result = try_allocate_in(r, req, in_new_region);
 640               if (result != nullptr) {
 641                 log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 642                 return result;
 643               }
 644             }
 645           }
 646         }

 647       }
 648 
 649       // No dice. Do not try to mix mutator and GC allocations, because
 650       // URWM moves due to GC allocations would expose unparsable mutator
 651       // allocations.
 652       break;
 653     }
 654     default:
 655       ShouldNotReachHere();
 656   }
 657   return nullptr;
 658 }
 659 
 660 // This work method takes an argument corresponding to the number of bytes
 661 // free in a region, and returns the largest amount in heapwords that can be allocated
 662 // such that both of the following conditions are satisfied:
 663 //
 664 // 1. it is a multiple of card size
 665 // 2. any remaining shard may be filled with a filler object
 666 //
 667 // The idea is that the allocation starts and ends at card boundaries. Because
 668 // a region ('s end) is card-aligned, the remainder shard that must be filled is
 669 // at the start of the free space.
 670 //
 671 // This is merely a helper method to use for the purpose of such a calculation.
 672 size_t get_usable_free_words(size_t free_bytes) {
 673   // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
 674   //      free is 514
 675   //      usable_free is 512, which is decreased to 0
 676   size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
 677   assert(usable_free <= free_bytes, "Sanity check");
 678   if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
 679     // After aligning to card multiples, the remainder would be smaller than
 680     // the minimum filler object, so we'll need to take away another card's
 681     // worth to construct a filler object.
 682     if (usable_free >= CardTable::card_size()) {
 683       usable_free -= CardTable::card_size();
 684     } else {
 685       assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
 686     }
 687   }
 688 
 689   return usable_free / HeapWordSize;
 690 }
 691 
 692 // Given a size argument, which is a multiple of card size, a request struct
 693 // for a PLAB, and an old region, return a pointer to the allocated space for
 694 // a PLAB which is card-aligned and where any remaining shard in the region
 695 // has been suitably filled by a filler object.
 696 // It is assumed (and assertion-checked) that such an allocation is always possible.
 697 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
 698   assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 699   assert(r->is_old(), "All PLABs reside in old-gen");
 700   assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
 701   assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design");
 702 
 703   HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
 704   assert(result != nullptr, "Allocation cannot fail");
 705   assert(r->top() <= r->end(), "Allocation cannot span end of region");
 706   assert(req.actual_size() == size, "Should not have needed to adjust size for PLAB.");
 707   assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
 708 
 709   return result;
 710 }
 711 
 712 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 713   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 714   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
 715     return nullptr;
 716   }
 717 

 718   try_recycle_trashed(r);
 719   if (!r->is_affiliated()) {
 720     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
 721     r->set_affiliation(req.affiliation());
 722     if (r->is_old()) {
 723       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
 724       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
 725       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
 726       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
 727       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
 728       // coalesce-and-fill processing.
 729       r->end_preemptible_coalesce_and_fill();
 730       _heap->old_generation()->clear_cards_for(r);
 731     }
 732     _heap->generation_for(r->affiliation())->increment_affiliated_region_count();
 733 
 734     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
 735     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
 736   } else if (r->affiliation() != req.affiliation()) {
 737     assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
 738            req.affiliation_name(), r->affiliation_name());
 739     return nullptr;
 740   }
 741 
 742   in_new_region = r->is_empty();
 743   HeapWord* result = nullptr;
 744 
 745   if (in_new_region) {
 746     log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
 747                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
 748   }
 749 
 750   // req.size() is in words, r->free() is in bytes.
 751   if (req.is_lab_alloc()) {
 752     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
 753       assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 754       assert(_free_sets.in_free_set(r->index(), OldCollector), "PLABS must be allocated in old_collector_free regions");
 755       // Need to assure that plabs are aligned on multiple of card region.
 756       // Since we have Elastic TLABs, align sizes up. They may be decreased to fit in the usable
 757       // memory remaining in the region (which will also be aligned to cards).
 758       size_t adjusted_size = align_up(req.size(), CardTable::card_size_in_words());
 759       size_t adjusted_min_size = align_up(req.min_size(), CardTable::card_size_in_words());
 760       size_t usable_free = get_usable_free_words(r->free());
 761 
 762       if (adjusted_size > usable_free) {
 763         adjusted_size = usable_free;
 764       }
 765 
 766       if (adjusted_size >= adjusted_min_size) {
 767         result = allocate_aligned_plab(adjusted_size, req, r);
 768       }
 769       // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
 770     } else {
 771       // This is a GCLAB or a TLAB allocation
 772       size_t adjusted_size = req.size();
 773       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 774       if (adjusted_size > free) {
 775         adjusted_size = free;
 776       }
 777       if (adjusted_size >= req.min_size()) {
 778         result = r->allocate(adjusted_size, req);
 779         assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
 780         req.set_actual_size(adjusted_size);
 781       } else {
 782         log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
 783                            " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
 784       }
 785     }
 786   } else {
 787     size_t size = req.size();
 788     result = r->allocate(size, req);
 789     if (result != nullptr) {
 790       // Record actual allocation size




 791       req.set_actual_size(size);
 792     }
 793   }
 794 
 795   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
 796   if (result != nullptr) {
 797     // Allocation successful, bump stats:
 798     if (req.is_mutator_alloc()) {
 799       assert(req.is_young(), "Mutator allocations always come from young generation.");
 800       _free_sets.increase_used(Mutator, req.actual_size() * HeapWordSize);
 801     } else {
 802       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
 803 
 804       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
 805       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
 806       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
 807       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
 808       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
 809       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
 810       // next evacuation pass.
 811       r->set_update_watermark(r->top());
 812       if (r->is_old()) {
 813         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
 814         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
 815       }
 816     }
 817   }
 818 
 819   if (result == nullptr || alloc_capacity(r) < PLAB::min_size() * HeapWordSize) {
 820     // Region cannot afford this and is likely to not afford future allocations. Retire it.
 821     //
 822     // While this seems a bit harsh, especially in the case when this large allocation does not
 823     // fit but the next small one would, we are risking to inflate scan times when lots of
 824     // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.



 825 
 826     // Record the remainder as allocation waste
 827     size_t idx = r->index();
 828     if (req.is_mutator_alloc()) {
 829       size_t waste = r->free();
 830       if (waste > 0) {
 831         _free_sets.increase_used(Mutator, waste);
 832         // This one request could cause several regions to be "retired", so we must accumulate the waste
 833         req.set_waste((waste >> LogHeapWordSize) + req.waste());
 834       }
 835       assert(_free_sets.membership(idx) == Mutator, "Must be mutator free: " SIZE_FORMAT, idx);
 836     } else {
 837       assert(_free_sets.membership(idx) == Collector || _free_sets.membership(idx) == OldCollector,
 838              "Must be collector or old-collector free: " SIZE_FORMAT, idx);
 839     }
 840     // This region is no longer considered free (in any set)
 841     _free_sets.remove_from_free_sets(idx);
 842     _free_sets.assert_bounds();
 843   }
 844   return result;
 845 }
 846 
 847 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {

 848   shenandoah_assert_heaplocked();
 849 
 850   size_t words_size = req.size();
 851   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 852 
 853   assert(req.is_young(), "Humongous regions always allocated in YOUNG");
 854   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
 855 
 856   // Check if there are enough regions left to satisfy allocation.
 857   if (_heap->mode()->is_generational()) {
 858     size_t avail_young_regions = generation->free_unaffiliated_regions();
 859     if (num > _free_sets.count(Mutator) || (num > avail_young_regions)) {
 860       return nullptr;
 861     }
 862   } else {
 863     if (num > _free_sets.count(Mutator)) {
 864       return nullptr;
 865     }
 866   }
 867 




 868   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 869   // inclusive. Contiguous allocations are biased to the beginning.
 870 
 871   size_t beg = _free_sets.leftmost(Mutator);
 872   size_t end = beg;




 873 
 874   while (true) {
 875     if (end >= _free_sets.max()) {
 876       // Hit the end, goodbye
 877       return nullptr;
 878     }
 879 
 880     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 881     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 882     if (!_free_sets.in_free_set(end, Mutator) || !can_allocate_from(_heap->get_region(end))) {
 883       end++;
 884       beg = end;
 885       continue;















 886     }
 887 
 888     if ((end - beg + 1) == num) {
 889       // found the match
 890       break;
 891     }
 892 
 893     end++;
 894   }
 895 
 896   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 897   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
 898 
 899   // Initialize regions:
 900   for (size_t i = beg; i <= end; i++) {
 901     ShenandoahHeapRegion* r = _heap->get_region(i);
 902     try_recycle_trashed(r);
 903 
 904     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 905     assert(r->is_empty(), "Should be empty");
 906 
 907     if (i == beg) {
 908       r->make_humongous_start();
 909     } else {
 910       r->make_humongous_cont();
 911     }
 912 
 913     // Trailing region may be non-full, record the remainder there
 914     size_t used_words;
 915     if ((i == end) && (remainder != 0)) {
 916       used_words = remainder;
 917     } else {
 918       used_words = ShenandoahHeapRegion::region_size_words();
 919     }
 920 
 921     r->set_affiliation(req.affiliation());
 922     r->set_update_watermark(r->bottom());
 923     r->set_top(r->bottom() + used_words);

 924 
 925     // While individual regions report their true use, all humongous regions are marked used in the free set.
 926     _free_sets.remove_from_free_sets(r->index());

 927   }
 928   generation->increase_affiliated_region_count(num);


 929 
 930   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
 931   _free_sets.increase_used(Mutator, total_humongous_size);
 932   _free_sets.assert_bounds();
 933   req.set_actual_size(words_size);
 934   if (remainder != 0) {
 935     req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
 936   }
 937   return _heap->get_region(beg)->bottom();
 938 }
 939 
 940 // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
 941 // immediate trash and we'll be able to immediately recycle it.  Note that we cannot recycle immediate trash if
 942 // concurrent weak root processing is in progress.
 943 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 944   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 945 }
 946 
 947 bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 948   ShenandoahHeapRegion* r = _heap->get_region(idx);
 949   return can_allocate_from(r);
 950 }
 951 
 952 size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
 953   ShenandoahHeapRegion* r = _heap->get_region(idx);
 954   return alloc_capacity(r);
 955 }
 956 
 957 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 958   if (r->is_trash()) {
 959     // This would be recycled on allocation path
 960     return ShenandoahHeapRegion::region_size_bytes();
 961   } else {
 962     return r->free();
 963   }
 964 }
 965 
 966 bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
 967   return alloc_capacity(r) > 0;
 968 }
 969 
 970 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 971   if (r->is_trash()) {

 972     r->recycle();
 973   }
 974 }
 975 
 976 void ShenandoahFreeSet::recycle_trash() {
 977   // lock is not reentrable, check we don't have it
 978   shenandoah_assert_not_heaplocked();
 979 
 980   for (size_t i = 0; i < _heap->num_regions(); i++) {
 981     ShenandoahHeapRegion* r = _heap->get_region(i);
 982     if (r->is_trash()) {
 983       ShenandoahHeapLocker locker(_heap->lock());
 984       try_recycle_trashed(r);
 985     }
 986     SpinPause(); // allow allocators to take the lock
 987   }
 988 }
 989 
 990 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
 991   size_t idx = r->index();
 992 
 993   assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
 994   // Note: can_allocate_from(r) means r is entirely empty
 995   assert(can_allocate_from(r), "Should not be allocated");
 996 
 997   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(_heap);
 998   size_t region_capacity = alloc_capacity(r);
 999   _free_sets.move_to_set(idx, OldCollector, region_capacity);
1000   _free_sets.assert_bounds();
1001   _heap->old_generation()->augment_evacuation_reserve(region_capacity);
1002   bool transferred = gen_heap->generation_sizer()->transfer_to_old(1);
1003   if (!transferred) {
1004     log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx);
1005     gen_heap->generation_sizer()->force_transfer_to_old(1);
1006   }
1007   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1008   // to recycle trash before attempting to allocate anything in the region.
1009 }
1010 
1011 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1012   size_t idx = r->index();
1013 
1014   assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
1015   assert(can_allocate_from(r), "Should not be allocated");
1016 
1017   size_t region_capacity = alloc_capacity(r);
1018   _free_sets.move_to_set(idx, Collector, region_capacity);
1019   _free_sets.assert_bounds();

1020 
1021   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1022   // to recycle trash before attempting to allocate anything in the region.
1023 }
1024 
1025 void ShenandoahFreeSet::clear() {
1026   shenandoah_assert_heaplocked();
1027   clear_internal();
1028 }
1029 
1030 void ShenandoahFreeSet::clear_internal() {
1031   _free_sets.clear_all();
1032 }
1033 
1034 // This function places all is_old() regions that have allocation capacity into the old_collector set.  It places
1035 // all other regions (not is_old()) that have allocation capacity into the mutator_set.  Subsequently, we will
1036 // move some of the mutator regions into the collector set or old_collector set with the intent of packing
1037 // old_collector memory into the highest (rightmost) addresses of the heap and the collector memory into the
1038 // next highest addresses of the heap, with mutator memory consuming the lowest addresses of the heap.
1039 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
1040                                                          size_t &first_old_region, size_t &last_old_region,
1041                                                          size_t &old_region_count) {
1042   first_old_region = _heap->num_regions();
1043   last_old_region = 0;
1044   old_region_count = 0;
1045   old_cset_regions = 0;
1046   young_cset_regions = 0;


1047   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
1048     ShenandoahHeapRegion* region = _heap->get_region(idx);
1049     if (region->is_trash()) {
1050       // Trashed regions represent regions that had been in the collection set but have not yet been "cleaned up".
1051       if (region->is_old()) {
1052         old_cset_regions++;
1053       } else {
1054         assert(region->is_young(), "Trashed region should be old or young");
1055         young_cset_regions++;
1056       }
1057     } else if (region->is_old() && region->is_regular()) {
1058       old_region_count++;
1059       if (first_old_region > idx) {
1060         first_old_region = idx;
1061       }
1062       last_old_region = idx;
1063     }
1064     if (region->is_alloc_allowed() || region->is_trash()) {
1065       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1066       assert(_free_sets.in_free_set(idx, NotFree), "We are about to make region free; it should not be free already");
1067 
1068       // Do not add regions that would almost surely fail allocation.  Note that PLAB::min_size() is typically less than ShenandoahGenerationalHeap::plab_min_size()
1069       if (alloc_capacity(region) < PLAB::min_size() * HeapWordSize) continue;



















1070 
1071       if (region->is_old()) {
1072         _free_sets.make_free(idx, OldCollector, alloc_capacity(region));
1073         log_debug(gc, free)(
1074           "  Adding Region " SIZE_FORMAT  " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to old collector set",
1075           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1076           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1077       } else {
1078         _free_sets.make_free(idx, Mutator, alloc_capacity(region));
1079         log_debug(gc, free)(
1080           "  Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator set",
1081           idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1082           byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1083       }
1084     }
1085   }


1086 }
1087 
1088 // Move no more than cset_regions from the existing Collector and OldCollector free sets to the Mutator free set.
1089 // This is called from outside the heap lock.
1090 void ShenandoahFreeSet::move_collector_sets_to_mutator(size_t max_xfer_regions) {
1091   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1092   size_t collector_empty_xfer = 0;
1093   size_t collector_not_empty_xfer = 0;
1094   size_t old_collector_empty_xfer = 0;
1095 
1096   // Process empty regions within the Collector free set
1097   if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(Collector) <= _free_sets.rightmost_empty(Collector))) {


1098     ShenandoahHeapLocker locker(_heap->lock());
1099     for (size_t idx = _free_sets.leftmost_empty(Collector);
1100          (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(Collector)); idx++) {
1101       if (_free_sets.in_free_set(idx, Collector) && can_allocate_from(idx)) {
1102         _free_sets.move_to_set(idx, Mutator, region_size_bytes);





1103         max_xfer_regions--;
1104         collector_empty_xfer += region_size_bytes;
1105       }

1106     }
1107   }
1108 
1109   // Process empty regions within the OldCollector free set
1110   size_t old_collector_regions = 0;
1111   if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(OldCollector) <= _free_sets.rightmost_empty(OldCollector))) {
1112     ShenandoahHeapLocker locker(_heap->lock());
1113     for (size_t idx = _free_sets.leftmost_empty(OldCollector);
1114          (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(OldCollector)); idx++) {
1115       if (_free_sets.in_free_set(idx, OldCollector) && can_allocate_from(idx)) {
1116         _free_sets.move_to_set(idx, Mutator, region_size_bytes);





1117         max_xfer_regions--;
1118         old_collector_empty_xfer += region_size_bytes;
1119         old_collector_regions++;
1120       }
1121     }
1122     if (old_collector_regions > 0) {
1123       ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions);
1124     }
1125   }
1126 
1127   // If there are any non-empty regions within Collector set, we can also move them to the Mutator free set
1128   if ((max_xfer_regions > 0) && (_free_sets.leftmost(Collector) <= _free_sets.rightmost(Collector))) {
1129     ShenandoahHeapLocker locker(_heap->lock());
1130     for (size_t idx = _free_sets.leftmost(Collector); (max_xfer_regions > 0) && (idx <= _free_sets.rightmost(Collector)); idx++) {
1131       size_t alloc_capacity = this->alloc_capacity(idx);
1132       if (_free_sets.in_free_set(idx, Collector) && (alloc_capacity > 0)) {
1133         _free_sets.move_to_set(idx, Mutator, alloc_capacity);
1134         max_xfer_regions--;
1135         collector_not_empty_xfer += alloc_capacity;
1136       }

1137     }
1138   }
1139 
1140   size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
1141   size_t total_xfer = collector_xfer + old_collector_empty_xfer;
1142   log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1143                      SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1144                      byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1145                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1146                      byte_size_in_proper_unit(old_collector_empty_xfer), proper_unit_for_byte_size(old_collector_empty_xfer));
1147 }
1148 


1149 
1150 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1151 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
1152                                            size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
1153   shenandoah_assert_heaplocked();
1154   // This resets all state information, removing all regions from all sets.
1155   clear();
1156   log_debug(gc, free)("Rebuilding FreeSet");
1157 
1158   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1159   // mutator set otherwise.
1160   find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
1161 }
1162 
1163 void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves) {
1164   shenandoah_assert_heaplocked();
1165   size_t young_reserve(0), old_reserve(0);
1166 
1167   if (!_heap->mode()->is_generational()) {
1168     young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1169     old_reserve = 0;






1170   } else {
1171     compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
1172                                    young_reserve, old_reserve);
1173 
1174   }
1175 
1176   reserve_regions(young_reserve, old_reserve);
1177   _free_sets.establish_alloc_bias(OldCollector);
1178   _free_sets.assert_bounds();
1179   log_status();
1180 }
1181 
1182 void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
1183                                                        size_t& young_reserve_result, size_t& old_reserve_result) const {
1184   shenandoah_assert_generational();
1185   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1186 
1187   ShenandoahOldGeneration* const old_generation = _heap->old_generation();
1188   size_t old_available = old_generation->available();
1189   size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions();
1190   ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
1191   size_t young_capacity = young_generation->max_capacity();
1192   size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
1193 
1194   // Add in the regions we anticipate to be freed by evacuation of the collection set
1195   old_unaffiliated_regions += old_cset_regions;
1196   old_available += old_cset_regions * region_size_bytes;
1197   young_unaffiliated_regions += young_cset_regions;
1198 
1199   // Consult old-region balance to make adjustments to current generation capacities and availability.
1200   // The generation region transfers take place after we rebuild.
1201   const ssize_t old_region_balance = old_generation->get_region_balance();
1202   if (old_region_balance != 0) {
1203     if (old_region_balance > 0) {
1204       assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1205     } else {
1206       assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1207     }
1208 
1209     ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
1210     old_available -= xfer_bytes;
1211     old_unaffiliated_regions -= old_region_balance;
1212     young_capacity += xfer_bytes;
1213     young_unaffiliated_regions += old_region_balance;
1214   }
1215 
1216   // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1217   // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
1218   // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
1219   // each PLAB's available memory.
1220   if (have_evacuation_reserves) {
1221     // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1222     const size_t promoted_reserve = old_generation->get_promoted_reserve();
1223     const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
1224     young_reserve_result = young_generation->get_evacuation_reserve();
1225     old_reserve_result = promoted_reserve + old_evac_reserve;
1226     assert(old_reserve_result <= old_available,
1227            "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1228            promoted_reserve, old_evac_reserve, old_available);
1229   } else {
1230     // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1231     young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
1232     // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1233     // Affiliated old-gen regions are already in the OldCollector free set.  Add in the relevant number of
1234     // unaffiliated regions.
1235     old_reserve_result = old_available;
1236   }
1237 
1238   // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1239   // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
1240   // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1241   // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1242   if (old_reserve_result > _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1243     old_reserve_result = _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes;
1244   }
1245 
1246   if (old_reserve_result > young_unaffiliated_regions * region_size_bytes) {
1247     young_reserve_result = young_unaffiliated_regions * region_size_bytes;
1248   }
1249 }
1250 
1251 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1252 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1253 // into the collector set or old collector set in order to assure that the memory available for allocations within
1254 // the collector set is at least to_reserve, and the memory available for allocations within the old collector set
1255 // is at least to_reserve_old.
1256 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old) {
1257   for (size_t i = _heap->num_regions(); i > 0; i--) {
1258     size_t idx = i - 1;
1259     ShenandoahHeapRegion* r = _heap->get_region(idx);
1260     if (!_free_sets.in_free_set(idx, Mutator)) {

1261       continue;
1262     }
1263 
1264     size_t ac = alloc_capacity(r);
1265     assert (ac > 0, "Membership in free set implies has capacity");
1266     assert (!r->is_old(), "mutator_is_free regions should not be affiliated OLD");
1267 
1268     bool move_to_old = _free_sets.capacity_of(OldCollector) < to_reserve_old;
1269     bool move_to_young = _free_sets.capacity_of(Collector) < to_reserve;
1270 
1271     if (!move_to_old && !move_to_young) {
1272       // We've satisfied both to_reserve and to_reserved_old

1273       break;
1274     }
1275 
1276     if (move_to_old) {
1277       if (r->is_trash() || !r->is_affiliated()) {
1278         // OLD regions that have available memory are already in the old_collector free set
1279         _free_sets.move_to_set(idx, OldCollector, ac);
1280         log_debug(gc, free)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1281         continue;
1282       }
1283     }
1284 
1285     if (move_to_young) {
1286       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1287       // they were entirely empty.  I'm not sure I understand the rationale for that.  That alternative behavior would
1288       // tend to mix survivor objects with ephemeral objects, making it more difficult to reclaim the memory for the
1289       // ephemeral objects.  It also delays aging of regions, causing promotion in place to be delayed.
1290       _free_sets.move_to_set(idx, Collector, ac);





1291       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1292     }
1293   }
1294 
1295   if (LogTarget(Info, gc, free)::is_enabled()) {
1296     size_t old_reserve = _free_sets.capacity_of(OldCollector);
1297     if (old_reserve < to_reserve_old) {
1298       log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
1299                          PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
1300     }
1301     size_t young_reserve = _free_sets.capacity_of(Collector);
1302     if (young_reserve < to_reserve) {
1303       log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1304                          PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve));
1305     }
1306   }
1307 }
1308 
1309 void ShenandoahFreeSet::log_status() {
1310   shenandoah_assert_heaplocked();
1311 
1312 #ifdef ASSERT
1313   // Dump of the FreeSet details is only enabled if assertions are enabled
1314   if (LogTarget(Debug, gc, free)::is_enabled()) {
1315 #define BUFFER_SIZE 80
1316     size_t retired_old = 0;
1317     size_t retired_old_humongous = 0;
1318     size_t retired_young = 0;
1319     size_t retired_young_humongous = 0;
1320     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1321     size_t retired_young_waste = 0;
1322     size_t retired_old_waste = 0;
1323     size_t consumed_collector = 0;
1324     size_t consumed_old_collector = 0;
1325     size_t consumed_mutator = 0;
1326     size_t available_old = 0;
1327     size_t available_young = 0;
1328     size_t available_mutator = 0;
1329     size_t available_collector = 0;
1330     size_t available_old_collector = 0;
1331 
1332     char buffer[BUFFER_SIZE];
1333     for (uint i = 0; i < BUFFER_SIZE; i++) {
1334       buffer[i] = '\0';
1335     }
1336     log_debug(gc, free)("FreeSet map legend:"
1337                        " M:mutator_free C:collector_free O:old_collector_free"
1338                        " H:humongous ~:retired old _:retired young");
1339     log_debug(gc, free)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1340                        " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1341                        "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
1342                        _free_sets.leftmost(Mutator), _free_sets.rightmost(Mutator),
1343                        _free_sets.leftmost(Collector), _free_sets.rightmost(Collector),
1344                        _free_sets.leftmost(OldCollector), _free_sets.rightmost(OldCollector),
1345                        _free_sets.alloc_from_left_bias(OldCollector)? "left to right": "right to left");
1346 
1347     for (uint i = 0; i < _heap->num_regions(); i++) {
1348       ShenandoahHeapRegion *r = _heap->get_region(i);
1349       uint idx = i % 64;
1350       if ((i != 0) && (idx == 0)) {
1351         log_debug(gc, free)(" %6u: %s", i-64, buffer);
1352       }
1353       if (_free_sets.in_free_set(i, Mutator)) {
1354         assert(!r->is_old(), "Old regions should not be in mutator_free set");
1355         size_t capacity = alloc_capacity(r);
1356         available_mutator += capacity;
1357         consumed_mutator += region_size_bytes - capacity;
1358         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1359       } else if (_free_sets.in_free_set(i, Collector)) {
1360         assert(!r->is_old(), "Old regions should not be in collector_free set");
1361         size_t capacity = alloc_capacity(r);
1362         available_collector += capacity;
1363         consumed_collector += region_size_bytes - capacity;
1364         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
1365       } else if (_free_sets.in_free_set(i, OldCollector)) {
1366         size_t capacity = alloc_capacity(r);
1367         available_old_collector += capacity;
1368         consumed_old_collector += region_size_bytes - capacity;
1369         buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
1370       } else if (r->is_humongous()) {
1371         if (r->is_old()) {
1372           buffer[idx] = 'H';
1373           retired_old_humongous += region_size_bytes;
1374         } else {
1375           buffer[idx] = 'h';
1376           retired_young_humongous += region_size_bytes;
1377         }
1378       } else {
1379         if (r->is_old()) {
1380           buffer[idx] = '~';
1381           retired_old_waste += alloc_capacity(r);
1382           retired_old += region_size_bytes;
1383         } else {
1384           buffer[idx] = '_';
1385           retired_young_waste += alloc_capacity(r);
1386           retired_young += region_size_bytes;
1387         }
1388       }
1389     }
1390     uint remnant = _heap->num_regions() % 64;
1391     if (remnant > 0) {
1392       buffer[remnant] = '\0';
1393     } else {
1394       remnant = 64;
1395     }
1396     log_debug(gc, free)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1397     size_t total_young = retired_young + retired_young_humongous;
1398     size_t total_old = retired_old + retired_old_humongous;
1399   }
1400 #endif
1401 
1402   LogTarget(Info, gc, free) lt;
1403   if (lt.is_enabled()) {
1404     ResourceMark rm;
1405     LogStream ls(lt);
1406 
1407     {
1408       size_t last_idx = 0;
1409       size_t max = 0;
1410       size_t max_contig = 0;
1411       size_t empty_contig = 0;
1412 
1413       size_t total_used = 0;
1414       size_t total_free = 0;
1415       size_t total_free_ext = 0;
1416 
1417       for (size_t idx = _free_sets.leftmost(Mutator); idx <= _free_sets.rightmost(Mutator); idx++) {
1418         if (_free_sets.in_free_set(idx, Mutator)) {

1419           ShenandoahHeapRegion *r = _heap->get_region(idx);
1420           size_t free = alloc_capacity(r);
1421           max = MAX2(max, free);
1422           if (r->is_empty()) {
1423             total_free_ext += free;
1424             if (last_idx + 1 == idx) {
1425               empty_contig++;
1426             } else {
1427               empty_contig = 1;
1428             }
1429           } else {
1430             empty_contig = 0;
1431           }
1432           total_used += r->used();
1433           total_free += free;
1434           max_contig = MAX2(max_contig, empty_contig);
1435           last_idx = idx;
1436         }
1437       }
1438 
1439       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1440       size_t free = capacity() - used();
1441 
1442       assert(free == total_free, "Sum of free within mutator regions (" SIZE_FORMAT
1443              ") should match mutator capacity (" SIZE_FORMAT ") minus mutator used (" SIZE_FORMAT ")",
1444              total_free, capacity(), used());

1445 
1446       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1447                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
1448                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1449                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1450       );
1451 
1452       ls.print("Frag: ");
1453       size_t frag_ext;
1454       if (total_free_ext > 0) {
1455         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1456       } else {
1457         frag_ext = 0;
1458       }
1459       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1460 
1461       size_t frag_int;
1462       if (_free_sets.count(Mutator) > 0) {
1463         frag_int = (100 * (total_used / _free_sets.count(Mutator)) / ShenandoahHeapRegion::region_size_bytes());

1464       } else {
1465         frag_int = 0;
1466       }
1467       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
1468       ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
1469                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), _free_sets.count(Mutator));

1470     }
1471 
1472     {
1473       size_t max = 0;
1474       size_t total_free = 0;
1475       size_t total_used = 0;
1476 
1477       for (size_t idx = _free_sets.leftmost(Collector); idx <= _free_sets.rightmost(Collector); idx++) {
1478         if (_free_sets.in_free_set(idx, Collector)) {

1479           ShenandoahHeapRegion *r = _heap->get_region(idx);
1480           size_t free = alloc_capacity(r);
1481           max = MAX2(max, free);
1482           total_free += free;
1483           total_used += r->used();
1484         }
1485       }
1486       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1487                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1488                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1489                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1490     }
1491 
1492     if (_heap->mode()->is_generational()) {
1493       size_t max = 0;
1494       size_t total_free = 0;
1495       size_t total_used = 0;
1496 
1497       for (size_t idx = _free_sets.leftmost(OldCollector); idx <= _free_sets.rightmost(OldCollector); idx++) {
1498         if (_free_sets.in_free_set(idx, OldCollector)) {
1499           ShenandoahHeapRegion *r = _heap->get_region(idx);
1500           size_t free = alloc_capacity(r);
1501           max = MAX2(max, free);
1502           total_free += free;
1503           total_used += r->used();
1504         }
1505       }
1506       ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1507                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1508                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
1509                   byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1510     }
1511   }
1512 }
1513 
1514 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1515   shenandoah_assert_heaplocked();
1516 
1517   // Allocation request is known to satisfy all memory budgeting constraints.
1518   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
1519     switch (req.type()) {
1520       case ShenandoahAllocRequest::_alloc_shared:
1521       case ShenandoahAllocRequest::_alloc_shared_gc:
1522         in_new_region = true;
1523         return allocate_contiguous(req);
1524       case ShenandoahAllocRequest::_alloc_plab:
1525       case ShenandoahAllocRequest::_alloc_gclab:
1526       case ShenandoahAllocRequest::_alloc_tlab:
1527         in_new_region = false;
1528         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
1529                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
1530         return nullptr;
1531       default:
1532         ShouldNotReachHere();
1533         return nullptr;
1534     }
1535   } else {
1536     return allocate_single(req, in_new_region);
1537   }
1538 }
1539 
1540 size_t ShenandoahFreeSet::unsafe_peek_free() const {
1541   // Deliberately not locked, this method is unsafe when free set is modified.
1542 
1543   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1544     if (index < _free_sets.max() && _free_sets.in_free_set(index, Mutator)) {
1545       ShenandoahHeapRegion* r = _heap->get_region(index);
1546       if (r->free() >= MinTLABSize) {
1547         return r->free();
1548       }
1549     }
1550   }
1551 
1552   // It appears that no regions left
1553   return 0;
1554 }
1555 
1556 void ShenandoahFreeSet::print_on(outputStream* out) const {
1557   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _free_sets.count(Mutator));
1558   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1559     if (_free_sets.in_free_set(index, Mutator)) {
1560       _heap->get_region(index)->print_on(out);
1561     }
1562   }
1563   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _free_sets.count(Collector));
1564   for (size_t index = _free_sets.leftmost(Collector); index <= _free_sets.rightmost(Collector); index++) {
1565     if (_free_sets.in_free_set(index, Collector)) {
1566       _heap->get_region(index)->print_on(out);
1567     }
1568   }
1569   if (_heap->mode()->is_generational()) {
1570     out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _free_sets.count(OldCollector));
1571     for (size_t index = _free_sets.leftmost(OldCollector); index <= _free_sets.rightmost(OldCollector); index++) {
1572       if (_free_sets.in_free_set(index, OldCollector)) {
1573         _heap->get_region(index)->print_on(out);
1574       }
1575     }
1576   }
1577 }
1578 
1579 /*
1580  * Internal fragmentation metric: describes how fragmented the heap regions are.
1581  *
1582  * It is derived as:
1583  *
1584  *               sum(used[i]^2, i=0..k)
1585  *   IF = 1 - ------------------------------
1586  *              C * sum(used[i], i=0..k)
1587  *
1588  * ...where k is the number of regions in computation, C is the region capacity, and
1589  * used[i] is the used space in the region.
1590  *
1591  * The non-linearity causes IF to be lower for the cases where the same total heap
1592  * used is densely packed. For example:
1593  *   a) Heap is completely full  => IF = 0
1594  *   b) Heap is half full, first 50% regions are completely full => IF = 0
1595  *   c) Heap is half full, each region is 50% full => IF = 1/2
1596  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
1597  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
1598  *   f) Heap has one small object per each region => IF =~ 1
1599  */
1600 double ShenandoahFreeSet::internal_fragmentation() {
1601   double squared = 0;
1602   double linear = 0;
1603   int count = 0;
1604 
1605   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1606     if (_free_sets.in_free_set(index, Mutator)) {
1607       ShenandoahHeapRegion* r = _heap->get_region(index);
1608       size_t used = r->used();
1609       squared += used * used;
1610       linear += used;
1611       count++;
1612     }


1613   }
1614 
1615   if (count > 0) {
1616     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
1617     return 1 - s;
1618   } else {
1619     return 0;
1620   }
1621 }
1622 
1623 /*
1624  * External fragmentation metric: describes how fragmented the heap is.
1625  *
1626  * It is derived as:
1627  *
1628  *   EF = 1 - largest_contiguous_free / total_free
1629  *
1630  * For example:
1631  *   a) Heap is completely empty => EF = 0
1632  *   b) Heap is completely full => EF = 0
1633  *   c) Heap is first-half full => EF = 1/2
1634  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
1635  */
1636 double ShenandoahFreeSet::external_fragmentation() {
1637   size_t last_idx = 0;
1638   size_t max_contig = 0;
1639   size_t empty_contig = 0;
1640 
1641   size_t free = 0;
1642 
1643   for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1644     if (_free_sets.in_free_set(index, Mutator)) {
1645       ShenandoahHeapRegion* r = _heap->get_region(index);
1646       if (r->is_empty()) {
1647         free += ShenandoahHeapRegion::region_size_bytes();
1648         if (last_idx + 1 == index) {
1649           empty_contig++;
1650         } else {
1651           empty_contig = 1;
1652         }
1653       } else {
1654         empty_contig = 0;
1655       }
1656 
1657       max_contig = MAX2(max_contig, empty_contig);
1658       last_idx = index;
1659     }



1660   }
1661 
1662   if (free > 0) {
1663     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
1664   } else {
1665     return 0;
1666   }
1667 }
1668 
< prev index next >