8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shared/tlab_globals.hpp"
28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
32 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
33 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
34 #include "logging/logStream.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "runtime/orderAccess.hpp"
37
38 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
39 switch (t) {
40 case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
41 case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
42 case ShenandoahFreeSetPartitionId::Collector: return "Collector";
43 default:
44 ShouldNotReachHere();
45 return "Unrecognized";
46 }
47 }
48
49 #ifndef PRODUCT
50 void ShenandoahRegionPartitions::dump_bitmap() const {
51 log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
52 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
53 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
54 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
55 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]);
56 log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
57 "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
58 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
59 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
60 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
61 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]);
62
63 log_debug(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits");
64 dump_bitmap_range(0, _max-1);
65 }
66
67 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
68 assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
69 assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
70 idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
71 idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
72 idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
73 while (aligned_start <= aligned_end) {
74 dump_bitmap_row(aligned_start);
75 aligned_start += alignment;
76 }
77 }
78
79 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
80 assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
81 idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
82 uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
83 uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
84 uintx free_bits = mutator_bits | collector_bits;
85 uintx notfree_bits = ~free_bits;
86 log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
87 aligned_idx, mutator_bits, collector_bits, notfree_bits);
88 }
89 #endif
90
91 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
92 _max(max_regions),
93 _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
94 _free_set(free_set),
95 _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) }
96 {
97 make_all_regions_unavailable();
98 }
99
100 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
101 return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
102 }
103
104 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
105 ShenandoahHeapRegion* r = _heap->get_region(idx);
106 return can_allocate_from(r);
107 }
108
109 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
110 if (r->is_trash()) {
111 // This would be recycled on allocation path
112 return ShenandoahHeapRegion::region_size_bytes();
113 } else {
114 return r->free();
115 }
145 // which_partition is shrinking after the region that used to be leftmost is retired.
146 return idx;
147 }
148
149 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
150 for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
151 _membership[partition_id].clear_all();
152 _leftmosts[partition_id] = _max;
153 _rightmosts[partition_id] = -1;
154 _leftmosts_empty[partition_id] = _max;
155 _rightmosts_empty[partition_id] = -1;;
156 _capacity[partition_id] = 0;
157 _used[partition_id] = 0;
158 }
159 _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
160 }
161
162 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
163 idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
164 size_t mutator_region_count, size_t mutator_used) {
165 _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
166 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
167 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
168 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
169 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
170
171 _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
172 _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
173 _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
174
175 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
176 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
177 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
178 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
179
180 _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
181 _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
182 _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
183 }
184
185 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
186 assert (which_partition < NumPartitions, "Partition must be valid");
187 _used[int(which_partition)] += bytes;
188 assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
189 "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
190 _used[int(which_partition)], _capacity[int(which_partition)], bytes);
191 }
192
193 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
194 ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
195 assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
196 if (low_idx == leftmost(partition)) {
197 assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
198 if (high_idx + 1 == _max) {
199 _leftmosts[int(partition)] = _max;
200 } else {
201 _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
202 }
203 if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
204 // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
205 _leftmosts_empty[int(partition)] = leftmost(partition);
206 }
207 }
208 if (high_idx == _rightmosts[int(partition)]) {
209 assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
210 if (low_idx == 0) {
211 _rightmosts[int(partition)] = -1;
212 } else {
213 _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
214 }
215 if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
216 // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
217 _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
218 }
219 }
220 if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
221 _leftmosts[int(partition)] = _max;
222 _rightmosts[int(partition)] = -1;
223 _leftmosts_empty[int(partition)] = _max;
224 _rightmosts_empty[int(partition)] = -1;
225 }
272
273 if (used_bytes < _region_size_bytes) {
274 // Count the alignment pad remnant of memory as used when we retire this region
275 increase_used(partition, _region_size_bytes - used_bytes);
276 }
277 _membership[int(partition)].clear_bit(idx);
278 shrink_interval_if_boundary_modified(partition, idx);
279 _region_counts[int(partition)]--;
280 }
281
282 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
283 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
284 assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
285 assert (which_partition < NumPartitions, "selected free partition must be valid");
286 assert (available <= _region_size_bytes, "Available cannot exceed region size");
287
288 _membership[int(which_partition)].set_bit(idx);
289 _capacity[int(which_partition)] += _region_size_bytes;
290 _used[int(which_partition)] += _region_size_bytes - available;
291 expand_interval_if_boundary_modified(which_partition, idx, available);
292
293 _region_counts[int(which_partition)]++;
294 }
295
296 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
297 ShenandoahFreeSetPartitionId new_partition, size_t available) {
298 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
299 assert (orig_partition < NumPartitions, "Original partition must be valid");
300 assert (new_partition < NumPartitions, "New partition must be valid");
301 assert (available <= _region_size_bytes, "Available cannot exceed region size");
302
303 // Expected transitions:
304 // During rebuild: Mutator => Collector
305 // During flip_to_gc: Mutator empty => Collector
306 // At start of update refs: Collector => Mutator
307 assert (((available <= _region_size_bytes) &&
308 (((orig_partition == ShenandoahFreeSetPartitionId::Mutator)
309 && (new_partition == ShenandoahFreeSetPartitionId::Collector)) ||
310 ((orig_partition == ShenandoahFreeSetPartitionId::Collector)
311 && (new_partition == ShenandoahFreeSetPartitionId::Mutator)))) ||
312 ((available == _region_size_bytes) &&
313 ((orig_partition == ShenandoahFreeSetPartitionId::Mutator)
314 && (new_partition == ShenandoahFreeSetPartitionId::Collector))), "Unexpected movement between partitions");
315
316 size_t used = _region_size_bytes - available;
317
318 _membership[int(orig_partition)].clear_bit(idx);
319 _membership[int(new_partition)].set_bit(idx);
320
321 _capacity[int(orig_partition)] -= _region_size_bytes;
322 _used[int(orig_partition)] -= used;
323 shrink_interval_if_boundary_modified(orig_partition, idx);
324
325 _capacity[int(new_partition)] += _region_size_bytes;;
326 _used[int(new_partition)] += used;
327 expand_interval_if_boundary_modified(new_partition, idx, available);
328
329 _region_counts[int(orig_partition)]--;
330 _region_counts[int(new_partition)]++;
331 }
332
333 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
334 return partition_name(membership(idx));
335 }
336
465 idx_t leftmosts[UIntNumPartitions];
466 idx_t rightmosts[UIntNumPartitions];
467 idx_t empty_leftmosts[UIntNumPartitions];
468 idx_t empty_rightmosts[UIntNumPartitions];
469
470 for (uint i = 0; i < UIntNumPartitions; i++) {
471 leftmosts[i] = _max;
472 empty_leftmosts[i] = _max;
473 rightmosts[i] = -1;
474 empty_rightmosts[i] = -1;
475 }
476
477 for (idx_t i = 0; i < _max; i++) {
478 ShenandoahFreeSetPartitionId partition = membership(i);
479 switch (partition) {
480 case ShenandoahFreeSetPartitionId::NotFree:
481 break;
482
483 case ShenandoahFreeSetPartitionId::Mutator:
484 case ShenandoahFreeSetPartitionId::Collector:
485 {
486 size_t capacity = _free_set->alloc_capacity(i);
487 bool is_empty = (capacity == _region_size_bytes);
488 assert(capacity > 0, "free regions must have allocation capacity");
489 if (i < leftmosts[int(partition)]) {
490 leftmosts[int(partition)] = i;
491 }
492 if (is_empty && (i < empty_leftmosts[int(partition)])) {
493 empty_leftmosts[int(partition)] = i;
494 }
495 if (i > rightmosts[int(partition)]) {
496 rightmosts[int(partition)] = i;
497 }
498 if (is_empty && (i > empty_rightmosts[int(partition)])) {
499 empty_rightmosts[int(partition)] = i;
500 }
501 break;
502 }
503
504 default:
554
555 // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
556 // Likewise for empty region partitions.
557 beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
558 end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
559 assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
560 "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
561 beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
562 assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
563 "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
564 end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
565
566 beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
567 end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
568 assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
569 "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
570 beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
571 assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
572 "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
573 end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
574 }
575 #endif
576
577 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
578 _heap(heap),
579 _partitions(max_regions, this),
580 _trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
581 _right_to_left_bias(false),
582 _alloc_bias_weight(0)
583 {
584 clear_internal();
585 }
586
587 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
588 shenandoah_assert_heaplocked();
589
590 // Scan the bitmap looking for a first fit.
591 //
592 // Leftmost and rightmost bounds provide enough caching to quickly find a region from which to allocate.
593 //
594 // Allocations are biased: GC allocations are taken from the high end of the heap. Regular (and TLAB)
595 // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
596 // Humongous mutator allocations are taken from the bottom of the heap.
597 //
598 // Free set maintains mutator and collector partitions. Mutator can only allocate from the
599 // Mutator partition. Collector prefers to allocate from the Collector partition, but may steal
600 // regions from the Mutator partition if the Collector partition has been depleted.
601
602 switch (req.type()) {
603 case ShenandoahAllocRequest::_alloc_tlab:
604 case ShenandoahAllocRequest::_alloc_shared: {
605 // Try to allocate in the mutator view
606 if (_alloc_bias_weight-- <= 0) {
607 // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
608 // of the heap. Typically, these are the more recently engaged regions and the objects in these regions have not
609 // yet had a chance to die (and/or are treated as floating garbage). If we use the same allocation bias on each
610 // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
611 // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
612 // during the most recent GC pass may once again prevent the region from being collected. We have found that
613 // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
614 // benchmarks. In the best case, this has the effect of consuming these partially consumed regions before
615 // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
616 //
617 // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
618 // available regions. Other potential benefits:
619 // 1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
620 // 2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
621 // late in the GC cycle.
622 idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
623 - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
624 idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
625 - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
626 _right_to_left_bias = (non_empty_on_right > non_empty_on_left);
627 _alloc_bias_weight = _InitialAllocBiasWeight;
628 }
629 if (_right_to_left_bias) {
630 // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
631 if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
632 // Use signed idx. Otherwise, loop will never terminate.
633 idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
634 for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
635 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
636 "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
637 ShenandoahHeapRegion* r = _heap->get_region(idx);
638 // try_allocate_in() increases used if the allocation is successful.
639 HeapWord* result;
640 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
641 if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
642 return result;
643 }
644 idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
645 }
646 }
647 } else {
648 // Allocate from low to high memory. This keeps the range of fully empty regions more tightly packed.
649 // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle. So this
654 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
655 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
656 "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
657 ShenandoahHeapRegion* r = _heap->get_region(idx);
658 // try_allocate_in() increases used if the allocation is successful.
659 HeapWord* result;
660 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
661 if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
662 return result;
663 }
664 idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
665 }
666 }
667 }
668 // There is no recovery. Mutator does not touch collector view at all.
669 break;
670 }
671 case ShenandoahAllocRequest::_alloc_gclab:
672 // GCLABs are for evacuation so we must be in evacuation phase.
673
674 case ShenandoahAllocRequest::_alloc_shared_gc: {
675 // Fast-path: try to allocate in the collector view first
676 idx_t leftmost_collector = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
677 for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx >= leftmost_collector; ) {
678 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
679 "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
680 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
681 if (result != nullptr) {
682 return result;
683 }
684 idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Collector, idx - 1);
685 }
686
687 // No dice. Can we borrow space from mutator view?
688 if (!ShenandoahEvacReserveOverflow) {
689 return nullptr;
690 }
691
692 // Try to steal an empty region from the mutator view.
693 idx_t leftmost_mutator_empty = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
694 for (idx_t idx = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost_mutator_empty; ) {
695 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
696 "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
697 ShenandoahHeapRegion* r = _heap->get_region(idx);
698 if (can_allocate_from(r)) {
699 flip_to_gc(r);
700 HeapWord *result = try_allocate_in(r, req, in_new_region);
701 if (result != nullptr) {
702 log_debug(gc)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
703 return result;
704 }
705 }
706 idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
707 }
708
709 // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
710 // due to GC allocations would expose unparsable mutator allocations.
711 break;
712 }
713 default:
714 ShouldNotReachHere();
715 }
716 return nullptr;
717 }
718
719 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
720 assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
721 if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
722 return nullptr;
723 }
724
725 HeapWord* result = nullptr;
726 try_recycle_trashed(r);
727 in_new_region = r->is_empty();
728
729 if (in_new_region) {
730 log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
731 r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
732 }
733
734 // req.size() is in words, r->free() is in bytes.
735 if (req.is_lab_alloc()) {
736 // This is a GCLAB or a TLAB allocation
737 size_t adjusted_size = req.size();
738 size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
739 if (adjusted_size > free) {
740 adjusted_size = free;
741 }
742 if (adjusted_size >= req.min_size()) {
743 result = r->allocate(adjusted_size, req.type());
744 log_debug(gc)("Allocated " SIZE_FORMAT " words (adjusted from " SIZE_FORMAT ") for %s @" PTR_FORMAT
745 " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT,
746 adjusted_size, req.size(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result),
747 _partitions.partition_membership_name(r->index()), r->index(), r->free());
748 assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
749 req.set_actual_size(adjusted_size);
750 } else {
751 log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
752 " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
753 }
754 } else {
755 size_t size = req.size();
756 result = r->allocate(size, req.type());
757 if (result != nullptr) {
758 // Record actual allocation size
759 log_debug(gc)("Allocated " SIZE_FORMAT " words for %s @" PTR_FORMAT
760 " from %s region " SIZE_FORMAT ", free bytes remaining: " SIZE_FORMAT,
761 size, ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(result),
762 _partitions.partition_membership_name(r->index()), r->index(), r->free());
763 req.set_actual_size(size);
764 }
765 }
766
767 if (result != nullptr) {
768 // Allocation successful, bump stats:
769 if (req.is_mutator_alloc()) {
770 _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
771 } else {
772 assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
773
774 // For GC allocations, we advance update_watermark because the objects relocated into this memory during
775 // evacuation are not updated during evacuation.
776 r->set_update_watermark(r->top());
777 }
778 }
779
780 static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
781 size_t ac = alloc_capacity(r);
782
783 if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
784 // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
785 // Note that retire_from_partition() increases used to account for waste.
786
787 // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
788 // then retire the region so that subsequent searches can find available memory more quickly.
789
790 size_t idx = r->index();
791 _partitions.retire_from_partition(req.is_mutator_alloc()?
792 ShenandoahFreeSetPartitionId::Mutator: ShenandoahFreeSetPartitionId::Collector,
793 idx, r->used());
794 _partitions.assert_bounds();
795 }
796 return result;
797 }
798
799 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
800 assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
801 shenandoah_assert_heaplocked();
802
803 size_t words_size = req.size();
804 idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
805
806 // Check if there are enough regions left to satisfy allocation.
807 if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
808 return nullptr;
809 }
810
811 idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
812 idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
813 idx_t last_possible_start = end_range - num;
814
815 // Find the continuous interval of $num regions, starting from $beg and ending in $end,
816 // inclusive. Contiguous allocations are biased to the beginning.
817 idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
818 start_range, num);
819 if (beg > last_possible_start) {
820 // Hit the end, goodbye
821 return nullptr;
822 }
823 idx_t end = beg;
824
825 while (true) {
826 // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
827 // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. If we can extend
828 // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
829 while (!can_allocate_from(_heap->get_region(end))) {
830 // region[end] is not empty, so we restart our search after region[end]
831 idx_t slide_delta = end + 1 - beg;
832 if (beg + slide_delta > last_possible_start) {
833 // no room to slide
834 return nullptr;
835 }
836 for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
837 if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
838 beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
865 ShenandoahHeapRegion* r = _heap->get_region(i);
866 try_recycle_trashed(r);
867
868 assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
869 assert(r->is_empty(), "Should be empty");
870
871 if (i == beg) {
872 r->make_humongous_start();
873 } else {
874 r->make_humongous_cont();
875 }
876
877 // Trailing region may be non-full, record the remainder there
878 size_t used_words;
879 if ((i == end) && (remainder != 0)) {
880 used_words = remainder;
881 } else {
882 used_words = ShenandoahHeapRegion::region_size_words();
883 }
884
885 r->set_top(r->bottom() + used_words);
886 }
887
888 if (remainder != 0) {
889 // Record this remainder as allocation waste
890 _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
891 }
892
893 // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
894 _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
895
896 size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
897 _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
898 _partitions.assert_bounds();
899 req.set_actual_size(words_size);
900 return _heap->get_region(beg)->bottom();
901 }
902
903 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) {
904 if (r->is_trash()) {
905 _heap->decrease_used(r->used());
906 r->recycle();
907 }
908 }
909
910 void ShenandoahFreeSet::recycle_trash() {
911 // lock is not reentrable, check we don't have it
912 shenandoah_assert_not_heaplocked();
913
914 size_t count = 0;
915 for (size_t i = 0; i < _heap->num_regions(); i++) {
916 ShenandoahHeapRegion* r = _heap->get_region(i);
917 if (r->is_trash()) {
918 _trash_regions[count++] = r;
919 }
920 }
921
922 // Relinquish the lock after this much time passed.
923 static constexpr jlong deadline_ns = 30000; // 30 us
924 size_t idx = 0;
925 while (idx < count) {
926 os::naked_yield(); // Yield to allow allocators to take the lock
927 ShenandoahHeapLocker locker(_heap->lock());
928 const jlong deadline = os::javaTimeNanos() + deadline_ns;
929 while (idx < count && os::javaTimeNanos() < deadline) {
930 try_recycle_trashed(_trash_regions[idx++]);
931 }
932 }
933 }
934
935 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
936 size_t idx = r->index();
937
938 assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
939 assert(can_allocate_from(r), "Should not be allocated");
940
941 size_t ac = alloc_capacity(r);
942 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
943 ShenandoahFreeSetPartitionId::Collector, ac);
944 _partitions.assert_bounds();
945
946 // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
947 // to recycle trash before attempting to allocate anything in the region.
948 }
949
950 void ShenandoahFreeSet::clear() {
951 shenandoah_assert_heaplocked();
952 clear_internal();
953 }
954
955 void ShenandoahFreeSet::clear_internal() {
956 _partitions.make_all_regions_unavailable();
957 }
958
959 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &cset_regions) {
960
961 cset_regions = 0;
962 clear_internal();
963 size_t region_size_bytes = _partitions.region_size_bytes();
964 size_t max_regions = _partitions.max_regions();
965
966 size_t mutator_leftmost = max_regions;
967 size_t mutator_rightmost = 0;
968 size_t mutator_leftmost_empty = max_regions;
969 size_t mutator_rightmost_empty = 0;
970
971 size_t mutator_regions = 0;
972 size_t mutator_used = 0;
973
974 for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
975 ShenandoahHeapRegion* region = _heap->get_region(idx);
976 if (region->is_trash()) {
977 // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
978 // The cset regions are not "trashed" until we have finished update refs.
979 cset_regions++;
980 }
981 if (region->is_alloc_allowed() || region->is_trash()) {
982
983 // Do not add regions that would almost surely fail allocation
984 size_t ac = alloc_capacity(region);
985 if (ac > PLAB::min_size() * HeapWordSize) {
986 _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
987
988 if (idx < mutator_leftmost) {
989 mutator_leftmost = idx;
990 }
991 if (idx > mutator_rightmost) {
992 mutator_rightmost = idx;
993 }
994 if (ac == region_size_bytes) {
995 if (idx < mutator_leftmost_empty) {
996 mutator_leftmost_empty = idx;
997 }
998 if (idx > mutator_rightmost_empty) {
999 mutator_rightmost_empty = idx;
1000 }
1001 }
1002 mutator_regions++;
1003 mutator_used += (region_size_bytes - ac);
1004
1005 log_debug(gc)(
1006 " Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator partition",
1007 idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1008 byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1009 }
1010 }
1011 }
1012 idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost;
1013 idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty;
1014 _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx,
1015 mutator_regions, mutator_used);
1016 }
1017
1018 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
1019 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1020 size_t collector_empty_xfer = 0;
1021 size_t collector_not_empty_xfer = 0;
1022
1023 // Process empty regions within the Collector free partition
1024 if ((max_xfer_regions > 0) &&
1025 (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1026 <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1027 ShenandoahHeapLocker locker(_heap->lock());
1028 idx_t rightmost = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector);
1029 for (idx_t idx = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector);
1030 (max_xfer_regions > 0) && (idx <= rightmost); ) {
1031 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
1032 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1033 // Note: can_allocate_from() denotes that region is entirely empty
1034 if (can_allocate_from(idx)) {
1035 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector,
1036 ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1037 max_xfer_regions--;
1038 collector_empty_xfer += region_size_bytes;
1039 }
1040 idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1041 }
1042 }
1043
1044 // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1045 if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1046 <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1047 ShenandoahHeapLocker locker(_heap->lock());
1048 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
1049 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1050 (max_xfer_regions > 0) && (idx <= rightmost); ) {
1051 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx),
1052 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1053 size_t ac = alloc_capacity(idx);
1054 if (ac > 0) {
1055 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Collector,
1056 ShenandoahFreeSetPartitionId::Mutator, ac);
1057 max_xfer_regions--;
1058 collector_not_empty_xfer += ac;
1059 }
1060 idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1061 }
1062 }
1063
1064 size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
1065 log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve",
1066 byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer));
1067 }
1068
1069 void ShenandoahFreeSet::prepare_to_rebuild(size_t &cset_regions) {
1070 shenandoah_assert_heaplocked();
1071
1072 log_debug(gc)("Rebuilding FreeSet");
1073
1074 // This places regions that have alloc_capacity into the mutator partition.
1075 find_regions_with_alloc_capacity(cset_regions);
1076 }
1077
1078 void ShenandoahFreeSet::finish_rebuild(size_t cset_regions) {
1079 shenandoah_assert_heaplocked();
1080
1081 // Our desire is to reserve this much memory for future evacuation. We may end up reserving less, if
1082 // memory is in short supply.
1083
1084 size_t reserve = _heap->max_capacity() * ShenandoahEvacReserve / 100;
1085 size_t available_in_collector_partition = (_partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector)
1086 - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector));
1087 size_t additional_reserve;
1088 if (available_in_collector_partition < reserve) {
1089 additional_reserve = reserve - available_in_collector_partition;
1090 } else {
1091 additional_reserve = 0;
1092 }
1093
1094 reserve_regions(reserve);
1095 _partitions.assert_bounds();
1096 log_status();
1097 }
1098
1099 void ShenandoahFreeSet::rebuild() {
1100 size_t cset_regions;
1101 prepare_to_rebuild(cset_regions);
1102 finish_rebuild(cset_regions);
1103 }
1104
1105 void ShenandoahFreeSet::reserve_regions(size_t to_reserve) {
1106 for (size_t i = _heap->num_regions(); i > 0; i--) {
1107 size_t idx = i - 1;
1108 ShenandoahHeapRegion* r = _heap->get_region(idx);
1109
1110 if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1111 continue;
1112 }
1113
1114 size_t ac = alloc_capacity(r);
1115 assert (ac > 0, "Membership in free partition implies has capacity");
1116
1117 bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1118 if (!move_to_collector) {
1119 // We've satisfied to_reserve
1120 break;
1121 }
1122
1123 if (move_to_collector) {
1124 // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1125 // they were entirely empty. This has the effect of causing new Mutator allocation to reside next to objects
1126 // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1127 // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1128 // survivor objects is less likely to be selected for the collection set. This alternative implementation allows
1129 // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1130 // occupy regions comprised entirely of ephemeral objects. These regions are highly likely to be included in the next
1131 // collection set, and they are easily evacuated because they have low density of live objects.
1132 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1133 ShenandoahFreeSetPartitionId::Collector, ac);
1134 log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1135 }
1136 }
1137
1138 if (LogTarget(Info, gc, free)::is_enabled()) {
1139 size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
1140 if (reserve < to_reserve) {
1141 log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1142 PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
1143 }
1144 }
1145 }
1146
1147 void ShenandoahFreeSet::log_status_under_lock() {
1148 // Must not be heap locked, it acquires heap lock only when log is enabled
1149 shenandoah_assert_not_heaplocked();
1150 if (LogTarget(Info, gc, free)::is_enabled()
1151 DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
1152 ShenandoahHeapLocker locker(_heap->lock());
1153 log_status();
1154 }
1155 }
1156
1157 void ShenandoahFreeSet::log_status() {
1158 shenandoah_assert_heaplocked();
1159
1160 #ifdef ASSERT
1161 // Dump of the FreeSet details is only enabled if assertions are enabled
1162 if (LogTarget(Debug, gc, free)::is_enabled()) {
1163 #define BUFFER_SIZE 80
1164 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1165 size_t consumed_collector = 0;
1166 size_t available_collector = 0;
1167 size_t consumed_mutator = 0;
1168 size_t available_mutator = 0;
1169
1170 char buffer[BUFFER_SIZE];
1171 for (uint i = 0; i < BUFFER_SIZE; i++) {
1172 buffer[i] = '\0';
1173 }
1174 log_debug(gc)("FreeSet map legend: M:mutator_free C:collector_free H:humongous _:retired");
1175 log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "],"
1176 " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "]",
1177 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1178 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1179 _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1180 _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));
1181
1182 for (uint i = 0; i < _heap->num_regions(); i++) {
1183 ShenandoahHeapRegion *r = _heap->get_region(i);
1184 uint idx = i % 64;
1185 if ((i != 0) && (idx == 0)) {
1186 log_debug(gc)(" %6u: %s", i-64, buffer);
1187 }
1188 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
1189 size_t capacity = alloc_capacity(r);
1190 available_mutator += capacity;
1191 consumed_mutator += region_size_bytes - capacity;
1192 buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1193 } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
1194 size_t capacity = alloc_capacity(r);
1195 available_collector += capacity;
1196 consumed_collector += region_size_bytes - capacity;
1197 buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
1198 } else if (r->is_humongous()) {
1199 buffer[idx] = 'h';
1200 } else {
1201 buffer[idx] = '_';
1202 }
1203 }
1204 uint remnant = _heap->num_regions() % 64;
1205 if (remnant > 0) {
1206 buffer[remnant] = '\0';
1207 } else {
1208 remnant = 64;
1209 }
1210 log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1211 }
1212 #endif
1213
1214 LogTarget(Info, gc, free) lt;
1215 if (lt.is_enabled()) {
1216 ResourceMark rm;
1217 LogStream ls(lt);
1218
1219 {
1220 idx_t last_idx = 0;
1221 size_t max = 0;
1239 } else {
1240 empty_contig = 1;
1241 }
1242 } else {
1243 empty_contig = 0;
1244 }
1245 total_used += r->used();
1246 total_free += free;
1247 max_contig = MAX2(max_contig, empty_contig);
1248 last_idx = idx;
1249 }
1250 }
1251
1252 size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1253 size_t free = capacity() - used();
1254
1255 // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1256 // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1257 // my internally tracked values of used() and free().
1258 assert(free == total_free, "Free memory should match");
1259
1260 ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1261 byte_size_in_proper_unit(free), proper_unit_for_byte_size(free),
1262 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
1263 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1264 );
1265
1266 ls.print("Frag: ");
1267 size_t frag_ext;
1268 if (total_free_ext > 0) {
1269 frag_ext = 100 - (100 * max_humongous / total_free_ext);
1270 } else {
1271 frag_ext = 0;
1272 }
1273 ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1274
1275 size_t frag_int;
1276 if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1277 frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1278 / ShenandoahHeapRegion::region_size_bytes());
1279 } else {
1280 frag_int = 0;
1281 }
1288 {
1289 size_t max = 0;
1290 size_t total_free = 0;
1291 size_t total_used = 0;
1292
1293 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1294 idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1295 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
1296 ShenandoahHeapRegion *r = _heap->get_region(idx);
1297 size_t free = alloc_capacity(r);
1298 max = MAX2(max, free);
1299 total_free += free;
1300 total_used += r->used();
1301 }
1302 }
1303 ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1304 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1305 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
1306 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1307 }
1308 }
1309 }
1310
1311 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1312 shenandoah_assert_heaplocked();
1313 if (ShenandoahHeapRegion::requires_humongous(req.size())) {
1314 switch (req.type()) {
1315 case ShenandoahAllocRequest::_alloc_shared:
1316 case ShenandoahAllocRequest::_alloc_shared_gc:
1317 in_new_region = true;
1318 return allocate_contiguous(req);
1319 case ShenandoahAllocRequest::_alloc_gclab:
1320 case ShenandoahAllocRequest::_alloc_tlab:
1321 in_new_region = false;
1322 assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size());
1323 return nullptr;
1324 default:
1325 ShouldNotReachHere();
1326 return nullptr;
1327 }
1328 } else {
1329 return allocate_single(req, in_new_region);
1330 }
1331 }
1332
1333 void ShenandoahFreeSet::print_on(outputStream* out) const {
1334 out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
1335 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1336 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1337 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1338 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1339 _heap->get_region(index)->print_on(out);
1340 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1341 }
1342 out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
1343 rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
1344 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
1345 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
1346 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1347 _heap->get_region(index)->print_on(out);
1348 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);
1349 }
1350 }
1351
1352 double ShenandoahFreeSet::internal_fragmentation() {
1353 double squared = 0;
1354 double linear = 0;
1355
1356 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
1357 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
1358 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
1359 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
1360 ShenandoahHeapRegion* r = _heap->get_region(index);
1361 size_t used = r->used();
1362 squared += used * used;
1363 linear += used;
1364 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
1365 }
1366
1367 if (linear > 0) {
1368 double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
1369 return 1 - s;
|
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shared/tlab_globals.hpp"
28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
37 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
38 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
39 #include "logging/logStream.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "runtime/orderAccess.hpp"
42
43 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
44 switch (t) {
45 case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
46 case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
47 case ShenandoahFreeSetPartitionId::Collector: return "Collector";
48 case ShenandoahFreeSetPartitionId::OldCollector: return "OldCollector";
49 default:
50 ShouldNotReachHere();
51 return "Unrecognized";
52 }
53 }
54
55 #ifndef PRODUCT
56 void ShenandoahRegionPartitions::dump_bitmap() const {
57 log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
58 "], Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
59 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
60 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
61 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
62 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)],
63 _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)],
64 _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]);
65 log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
66 "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
67 "], Empty Old Collecto range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
68 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
69 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
70 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
71 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
72 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
73 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]);
74
75 log_debug(gc)("%6s: %18s %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "Old Collector Bits", "NotFree Bits");
76 dump_bitmap_range(0, _max-1);
77 }
78
79 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
80 assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
81 assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
82 idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
83 idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
84 idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
85 while (aligned_start <= aligned_end) {
86 dump_bitmap_row(aligned_start);
87 aligned_start += alignment;
88 }
89 }
90
91 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
92 assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
93 idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
94 uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
95 uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
96 uintx old_collector_bits = _membership[int(ShenandoahFreeSetPartitionId::OldCollector)].bits_at(aligned_idx);
97 uintx free_bits = mutator_bits | collector_bits | old_collector_bits;
98 uintx notfree_bits = ~free_bits;
99 log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
100 aligned_idx, mutator_bits, collector_bits, old_collector_bits, notfree_bits);
101 }
102 #endif
103
104 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
105 _max(max_regions),
106 _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
107 _free_set(free_set),
108 _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) , ShenandoahSimpleBitMap(max_regions) }
109 {
110 make_all_regions_unavailable();
111 }
112
113 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
114 return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
115 }
116
117 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
118 ShenandoahHeapRegion* r = _heap->get_region(idx);
119 return can_allocate_from(r);
120 }
121
122 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
123 if (r->is_trash()) {
124 // This would be recycled on allocation path
125 return ShenandoahHeapRegion::region_size_bytes();
126 } else {
127 return r->free();
128 }
158 // which_partition is shrinking after the region that used to be leftmost is retired.
159 return idx;
160 }
161
162 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
163 for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
164 _membership[partition_id].clear_all();
165 _leftmosts[partition_id] = _max;
166 _rightmosts[partition_id] = -1;
167 _leftmosts_empty[partition_id] = _max;
168 _rightmosts_empty[partition_id] = -1;;
169 _capacity[partition_id] = 0;
170 _used[partition_id] = 0;
171 }
172 _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
173 }
174
175 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
176 idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
177 size_t mutator_region_count, size_t mutator_used) {
178 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
179 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
180 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
181 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
182
183 _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
184 _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
185 _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
186
187 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
188 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
189 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
190 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
191
192 _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
193 _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
194 _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
195 }
196
197 void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost,
198 idx_t old_collector_leftmost_empty,
199 idx_t old_collector_rightmost_empty,
200 size_t old_collector_region_count, size_t old_collector_used) {
201 _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost;
202 _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost;
203 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty;
204 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost_empty;
205
206 _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count;
207 _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used;
208 _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count * _region_size_bytes;
209 }
210
211 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
212 assert (which_partition < NumPartitions, "Partition must be valid");
213 _used[int(which_partition)] += bytes;
214 assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
215 "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
216 _used[int(which_partition)], _capacity[int(which_partition)], bytes);
217 }
218
219 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
220 ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
221 assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
222 if (low_idx == leftmost(partition)) {
223 assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
224 if (high_idx + 1 == _max) {
225 _leftmosts[int(partition)] = _max;
226 } else {
227 _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
228 }
229 if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
230 // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
231 _leftmosts_empty[int(partition)] = _leftmosts[int(partition)];
232 }
233 }
234 if (high_idx == _rightmosts[int(partition)]) {
235 assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
236 if (low_idx == 0) {
237 _rightmosts[int(partition)] = -1;
238 } else {
239 _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
240 }
241 if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
242 // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
243 _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
244 }
245 }
246 if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
247 _leftmosts[int(partition)] = _max;
248 _rightmosts[int(partition)] = -1;
249 _leftmosts_empty[int(partition)] = _max;
250 _rightmosts_empty[int(partition)] = -1;
251 }
298
299 if (used_bytes < _region_size_bytes) {
300 // Count the alignment pad remnant of memory as used when we retire this region
301 increase_used(partition, _region_size_bytes - used_bytes);
302 }
303 _membership[int(partition)].clear_bit(idx);
304 shrink_interval_if_boundary_modified(partition, idx);
305 _region_counts[int(partition)]--;
306 }
307
308 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
309 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
310 assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
311 assert (which_partition < NumPartitions, "selected free partition must be valid");
312 assert (available <= _region_size_bytes, "Available cannot exceed region size");
313
314 _membership[int(which_partition)].set_bit(idx);
315 _capacity[int(which_partition)] += _region_size_bytes;
316 _used[int(which_partition)] += _region_size_bytes - available;
317 expand_interval_if_boundary_modified(which_partition, idx, available);
318 _region_counts[int(which_partition)]++;
319 }
320
321 bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) {
322 return (p == ShenandoahFreeSetPartitionId::Mutator);
323 }
324
325 bool ShenandoahRegionPartitions::is_young_collector_partition(ShenandoahFreeSetPartitionId p) {
326 return (p == ShenandoahFreeSetPartitionId::Collector);
327 }
328
329 bool ShenandoahRegionPartitions::is_old_collector_partition(ShenandoahFreeSetPartitionId p) {
330 return (p == ShenandoahFreeSetPartitionId::OldCollector);
331 }
332
333 bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_region) {
334 return (available_in_region == _region_size_bytes);
335 }
336
337
338 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
339 ShenandoahFreeSetPartitionId new_partition, size_t available) {
340 ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx);
341 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
342 assert (orig_partition < NumPartitions, "Original partition must be valid");
343 assert (new_partition < NumPartitions, "New partition must be valid");
344 assert (available <= _region_size_bytes, "Available cannot exceed region size");
345 assert (_membership[int(orig_partition)].is_set(idx), "Cannot move from partition unless in partition");
346 assert ((r != nullptr) && ((r->is_trash() && (available == _region_size_bytes)) ||
347 (r->used() + available == _region_size_bytes)),
348 "Used: " SIZE_FORMAT " + available: " SIZE_FORMAT " should equal region size: " SIZE_FORMAT,
349 ShenandoahHeap::heap()->get_region(idx)->used(), available, _region_size_bytes);
350
351 // Expected transitions:
352 // During rebuild: Mutator => Collector
353 // Mutator empty => Collector
354 // Mutator empty => OldCollector
355 // During flip_to_gc: Mutator empty => Collector
356 // Mutator empty => OldCollector
357 // At start of update refs: Collector => Mutator
358 // OldCollector Empty => Mutator
359 assert ((is_mutator_partition(orig_partition) && is_young_collector_partition(new_partition)) ||
360 (is_mutator_partition(orig_partition) &&
361 available_implies_empty(available) && is_old_collector_partition(new_partition)) ||
362 (is_young_collector_partition(orig_partition) && is_mutator_partition(new_partition)) ||
363 (is_old_collector_partition(orig_partition)
364 && available_implies_empty(available) && is_mutator_partition(new_partition)),
365 "Unexpected movement between partitions, available: " SIZE_FORMAT ", _region_size_bytes: " SIZE_FORMAT
366 ", orig_partition: %s, new_partition: %s",
367 available, _region_size_bytes, partition_name(orig_partition), partition_name(new_partition));
368
369 size_t used = _region_size_bytes - available;
370 assert (_used[int(orig_partition)] >= used,
371 "Orig partition used: " SIZE_FORMAT " must exceed moved used: " SIZE_FORMAT " within region " SSIZE_FORMAT,
372 _used[int(orig_partition)], used, idx);
373
374 _membership[int(orig_partition)].clear_bit(idx);
375 _membership[int(new_partition)].set_bit(idx);
376
377 _capacity[int(orig_partition)] -= _region_size_bytes;
378 _used[int(orig_partition)] -= used;
379 shrink_interval_if_boundary_modified(orig_partition, idx);
380
381 _capacity[int(new_partition)] += _region_size_bytes;;
382 _used[int(new_partition)] += used;
383 expand_interval_if_boundary_modified(new_partition, idx, available);
384
385 _region_counts[int(orig_partition)]--;
386 _region_counts[int(new_partition)]++;
387 }
388
389 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
390 return partition_name(membership(idx));
391 }
392
521 idx_t leftmosts[UIntNumPartitions];
522 idx_t rightmosts[UIntNumPartitions];
523 idx_t empty_leftmosts[UIntNumPartitions];
524 idx_t empty_rightmosts[UIntNumPartitions];
525
526 for (uint i = 0; i < UIntNumPartitions; i++) {
527 leftmosts[i] = _max;
528 empty_leftmosts[i] = _max;
529 rightmosts[i] = -1;
530 empty_rightmosts[i] = -1;
531 }
532
533 for (idx_t i = 0; i < _max; i++) {
534 ShenandoahFreeSetPartitionId partition = membership(i);
535 switch (partition) {
536 case ShenandoahFreeSetPartitionId::NotFree:
537 break;
538
539 case ShenandoahFreeSetPartitionId::Mutator:
540 case ShenandoahFreeSetPartitionId::Collector:
541 case ShenandoahFreeSetPartitionId::OldCollector:
542 {
543 size_t capacity = _free_set->alloc_capacity(i);
544 bool is_empty = (capacity == _region_size_bytes);
545 assert(capacity > 0, "free regions must have allocation capacity");
546 if (i < leftmosts[int(partition)]) {
547 leftmosts[int(partition)] = i;
548 }
549 if (is_empty && (i < empty_leftmosts[int(partition)])) {
550 empty_leftmosts[int(partition)] = i;
551 }
552 if (i > rightmosts[int(partition)]) {
553 rightmosts[int(partition)] = i;
554 }
555 if (is_empty && (i > empty_rightmosts[int(partition)])) {
556 empty_rightmosts[int(partition)] = i;
557 }
558 break;
559 }
560
561 default:
611
612 // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
613 // Likewise for empty region partitions.
614 beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
615 end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
616 assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
617 "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
618 beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
619 assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
620 "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
621 end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
622
623 beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
624 end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
625 assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
626 "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
627 beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
628 assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
629 "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
630 end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
631
632 // Performance invariants. Failing these would not break the free partition, but performance would suffer.
633 assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT,
634 leftmost(ShenandoahFreeSetPartitionId::OldCollector), _max);
635 assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "rightmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT,
636 rightmost(ShenandoahFreeSetPartitionId::OldCollector), _max);
637
638 assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
639 || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::OldCollector),
640 ShenandoahFreeSetPartitionId::OldCollector),
641 "leftmost region should be free: " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
642 assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
643 || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::OldCollector),
644 ShenandoahFreeSetPartitionId::OldCollector),
645 "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
646
647 // If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
648 // Likewise for empty region partitions.
649 beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
650 end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
651 assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
652 "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
653 beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
654 assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
655 "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
656 end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
657
658 beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
659 end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
660 assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
661 "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
662 beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
663 assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
664 "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
665 end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
666 }
667 #endif
668
669 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
670 _heap(heap),
671 _partitions(max_regions, this),
672 _trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
673 _alloc_bias_weight(0)
674 {
675 clear_internal();
676 }
677
678 void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
679 shenandoah_assert_heaplocked();
680 size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
681 size_t idx = region->index();
682 size_t capacity = alloc_capacity(region);
683 assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree,
684 "Regions promoted in place should have been excluded from Mutator partition");
685 if (capacity >= plab_min_size_in_bytes) {
686 _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, capacity);
687 _heap->old_generation()->augment_promoted_reserve(capacity);
688 }
689 }
690
691 HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition,
692 ShenandoahAffiliation affiliation,
693 ShenandoahAllocRequest& req, bool& in_new_region) {
694 shenandoah_assert_heaplocked();
695 idx_t rightmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
696 _partitions.rightmost_empty(which_partition): _partitions.rightmost(which_partition));
697 idx_t leftmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
698 _partitions.leftmost_empty(which_partition): _partitions.leftmost(which_partition));
699 if (_partitions.alloc_from_left_bias(which_partition)) {
700 for (idx_t idx = leftmost_collector; idx <= rightmost_collector; ) {
701 assert(_partitions.in_free_set(which_partition, idx), "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
702 ShenandoahHeapRegion* r = _heap->get_region(idx);
703 if (r->affiliation() == affiliation) {
704 HeapWord* result = try_allocate_in(r, req, in_new_region);
705 if (result != nullptr) {
706 return result;
707 }
708 }
709 idx = _partitions.find_index_of_next_available_region(which_partition, idx + 1);
710 }
711 } else {
712 for (idx_t idx = rightmost_collector; idx >= leftmost_collector; ) {
713 assert(_partitions.in_free_set(which_partition, idx),
714 "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
715 ShenandoahHeapRegion* r = _heap->get_region(idx);
716 if (r->affiliation() == affiliation) {
717 HeapWord* result = try_allocate_in(r, req, in_new_region);
718 if (result != nullptr) {
719 return result;
720 }
721 }
722 idx = _partitions.find_index_of_previous_available_region(which_partition, idx - 1);
723 }
724 }
725 log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
726 shenandoah_affiliation_name(affiliation), p2i(&req));
727 return nullptr;
728 }
729
730 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
731 shenandoah_assert_heaplocked();
732
733 // Scan the bitmap looking for a first fit.
734 //
735 // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
736 // we would find the region to allocate at right away.
737 //
738 // Allocations are biased: GC allocations are taken from the high end of the heap. Regular (and TLAB)
739 // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
740 // Humongous mutator allocations are taken from the bottom of the heap.
741 //
742 // Free set maintains mutator and collector partitions. Normally, each allocates only from its partition,
743 // except in special cases when the collector steals regions from the mutator partition.
744
745 // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
746 bool allow_new_region = true;
747 if (_heap->mode()->is_generational()) {
748 switch (req.affiliation()) {
749 case ShenandoahAffiliation::OLD_GENERATION:
750 // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
751 if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
752 allow_new_region = false;
753 }
754 break;
755
756 case ShenandoahAffiliation::YOUNG_GENERATION:
757 // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
758 if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
759 allow_new_region = false;
760 }
761 break;
762
763 case ShenandoahAffiliation::FREE:
764 fatal("Should request affiliation");
765
766 default:
767 ShouldNotReachHere();
768 break;
769 }
770 }
771 switch (req.type()) {
772 case ShenandoahAllocRequest::_alloc_tlab:
773 case ShenandoahAllocRequest::_alloc_shared: {
774 // Try to allocate in the mutator view
775 if (_alloc_bias_weight-- <= 0) {
776 // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
777 // of the heap. Typically, these are the more recently engaged regions and the objects in these regions have not
778 // yet had a chance to die (and/or are treated as floating garbage). If we use the same allocation bias on each
779 // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
780 // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
781 // during the most recent GC pass may once again prevent the region from being collected. We have found that
782 // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
783 // benchmarks. In the best case, this has the effect of consuming these partially consumed regions before
784 // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
785 //
786 // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
787 // available regions. Other potential benefits:
788 // 1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
789 // 2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
790 // late in the GC cycle.
791 idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
792 - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
793 idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
794 - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
795 _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
796 _alloc_bias_weight = _InitialAllocBiasWeight;
797 }
798 if (!_partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)) {
799 // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
800 if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
801 // Use signed idx. Otherwise, loop will never terminate.
802 idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
803 for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
804 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
805 "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
806 ShenandoahHeapRegion* r = _heap->get_region(idx);
807 // try_allocate_in() increases used if the allocation is successful.
808 HeapWord* result;
809 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
810 if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
811 return result;
812 }
813 idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
814 }
815 }
816 } else {
817 // Allocate from low to high memory. This keeps the range of fully empty regions more tightly packed.
818 // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle. So this
823 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
824 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
825 "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
826 ShenandoahHeapRegion* r = _heap->get_region(idx);
827 // try_allocate_in() increases used if the allocation is successful.
828 HeapWord* result;
829 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
830 if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
831 return result;
832 }
833 idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
834 }
835 }
836 }
837 // There is no recovery. Mutator does not touch collector view at all.
838 break;
839 }
840 case ShenandoahAllocRequest::_alloc_gclab:
841 // GCLABs are for evacuation so we must be in evacuation phase.
842
843 case ShenandoahAllocRequest::_alloc_plab: {
844 // PLABs always reside in old-gen and are only allocated during
845 // evacuation phase.
846
847 case ShenandoahAllocRequest::_alloc_shared_gc: {
848 // Fast-path: try to allocate in the collector view first
849 HeapWord* result;
850 result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
851 ShenandoahFreeSetPartitionId::Collector,
852 req.affiliation(), req, in_new_region);
853 if (result != nullptr) {
854 return result;
855 } else if (allow_new_region) {
856 // Try a free region that is dedicated to GC allocations.
857 result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
858 ShenandoahFreeSetPartitionId::Collector,
859 ShenandoahAffiliation::FREE, req, in_new_region);
860 if (result != nullptr) {
861 return result;
862 }
863 }
864
865 // No dice. Can we borrow space from mutator view?
866 if (!ShenandoahEvacReserveOverflow) {
867 return nullptr;
868 }
869 if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
870 // This allows us to flip a mutator region to old_collector
871 allow_new_region = true;
872 }
873
874 // We should expand old-gen if this can prevent an old-gen evacuation failure. We don't care so much about
875 // promotion failures since they can be mitigated in a subsequent GC pass. Would be nice to know if this
876 // allocation request is for evacuation or promotion. Individual threads limit their use of PLAB memory for
877 // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
878 // only for old-gen evacuations.
879 if (allow_new_region) {
880 // Try to steal an empty region from the mutator view.
881 idx_t rightmost_mutator = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator);
882 idx_t leftmost_mutator = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
883 for (idx_t idx = rightmost_mutator; idx >= leftmost_mutator; ) {
884 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
885 "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
886 ShenandoahHeapRegion* r = _heap->get_region(idx);
887 if (can_allocate_from(r)) {
888 if (req.is_old()) {
889 flip_to_old_gc(r);
890 } else {
891 flip_to_gc(r);
892 }
893 // Region r is entirely empty. If try_allocat_in fails on region r, something else is really wrong.
894 // Don't bother to retry with other regions.
895 log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
896 return try_allocate_in(r, req, in_new_region);
897 }
898 idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
899 }
900 }
901 // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
902 // due to GC allocations would expose unparsable mutator allocations.
903 break;
904 }
905 }
906 default:
907 ShouldNotReachHere();
908 }
909 return nullptr;
910 }
911
912 // This work method takes an argument corresponding to the number of bytes
913 // free in a region, and returns the largest amount in heapwords that can be allocated
914 // such that both of the following conditions are satisfied:
915 //
916 // 1. it is a multiple of card size
917 // 2. any remaining shard may be filled with a filler object
918 //
919 // The idea is that the allocation starts and ends at card boundaries. Because
920 // a region ('s end) is card-aligned, the remainder shard that must be filled is
921 // at the start of the free space.
922 //
923 // This is merely a helper method to use for the purpose of such a calculation.
924 size_t ShenandoahFreeSet::get_usable_free_words(size_t free_bytes) const {
925 // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
926 // free is 514
927 // usable_free is 512, which is decreased to 0
928 size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
929 assert(usable_free <= free_bytes, "Sanity check");
930 if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
931 // After aligning to card multiples, the remainder would be smaller than
932 // the minimum filler object, so we'll need to take away another card's
933 // worth to construct a filler object.
934 if (usable_free >= CardTable::card_size()) {
935 usable_free -= CardTable::card_size();
936 } else {
937 assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
938 }
939 }
940
941 return usable_free / HeapWordSize;
942 }
943
944 // Given a size argument, which is a multiple of card size, a request struct
945 // for a PLAB, and an old region, return a pointer to the allocated space for
946 // a PLAB which is card-aligned and where any remaining shard in the region
947 // has been suitably filled by a filler object.
948 // It is assumed (and assertion-checked) that such an allocation is always possible.
949 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
950 assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
951 assert(r->is_old(), "All PLABs reside in old-gen");
952 assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
953 assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design");
954
955 HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
956 assert(result != nullptr, "Allocation cannot fail");
957 assert(r->top() <= r->end(), "Allocation cannot span end of region");
958 assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
959 return result;
960 }
961
962 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
963 assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
964 if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
965 return nullptr;
966 }
967 HeapWord* result = nullptr;
968 try_recycle_trashed(r);
969 in_new_region = r->is_empty();
970
971 if (in_new_region) {
972 log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
973 r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
974 assert(!r->is_affiliated(), "New region " SIZE_FORMAT " should be unaffiliated", r->index());
975 r->set_affiliation(req.affiliation());
976 if (r->is_old()) {
977 // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
978 // all objects allocated within this region are above TAMS (and thus are implicitly marked). In case this is an
979 // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
980 // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
981 // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
982 // coalesce-and-fill processing.
983 r->end_preemptible_coalesce_and_fill();
984 _heap->old_generation()->clear_cards_for(r);
985 }
986 _heap->generation_for(r->affiliation())->increment_affiliated_region_count();
987
988 #ifdef ASSERT
989 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
990 assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
991 assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
992 #endif
993 log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
994 r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
995 } else {
996 assert(r->is_affiliated(), "Region " SIZE_FORMAT " that is not new should be affiliated", r->index());
997 if (r->affiliation() != req.affiliation()) {
998 assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
999 req.affiliation_name(), r->affiliation_name());
1000 return nullptr;
1001 }
1002 }
1003
1004 // req.size() is in words, r->free() is in bytes.
1005 if (req.is_lab_alloc()) {
1006 size_t adjusted_size = req.size();
1007 size_t free = r->free(); // free represents bytes available within region r
1008 if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1009 // This is a PLAB allocation
1010 assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
1011 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
1012 "PLABS must be allocated in old_collector_free regions");
1013
1014 // Need to assure that plabs are aligned on multiple of card region
1015 // Convert free from unaligned bytes to aligned number of words
1016 size_t usable_free = get_usable_free_words(free);
1017 if (adjusted_size > usable_free) {
1018 adjusted_size = usable_free;
1019 }
1020 adjusted_size = align_down(adjusted_size, CardTable::card_size_in_words());
1021 if (adjusted_size >= req.min_size()) {
1022 result = allocate_aligned_plab(adjusted_size, req, r);
1023 assert(result != nullptr, "allocate must succeed");
1024 req.set_actual_size(adjusted_size);
1025 } else {
1026 // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
1027 log_trace(gc, free)("Failed to shrink PLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1028 " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1029 }
1030 } else {
1031 // This is a GCLAB or a TLAB allocation
1032 // Convert free from unaligned bytes to aligned number of words
1033 free = align_down(free >> LogHeapWordSize, MinObjAlignment);
1034 if (adjusted_size > free) {
1035 adjusted_size = free;
1036 }
1037 if (adjusted_size >= req.min_size()) {
1038 result = r->allocate(adjusted_size, req);
1039 assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
1040 req.set_actual_size(adjusted_size);
1041 } else {
1042 log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1043 " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1044 }
1045 }
1046 } else {
1047 size_t size = req.size();
1048 result = r->allocate(size, req);
1049 if (result != nullptr) {
1050 // Record actual allocation size
1051 req.set_actual_size(size);
1052 }
1053 }
1054
1055 if (result != nullptr) {
1056 // Allocation successful, bump stats:
1057 if (req.is_mutator_alloc()) {
1058 assert(req.is_young(), "Mutator allocations always come from young generation.");
1059 _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
1060 } else {
1061 assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
1062
1063 // For GC allocations, we advance update_watermark because the objects relocated into this memory during
1064 // evacuation are not updated during evacuation. For both young and old regions r, it is essential that all
1065 // PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation.
1066 r->set_update_watermark(r->top());
1067 if (r->is_old()) {
1068 _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, req.actual_size() * HeapWordSize);
1069 assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
1070 // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
1071 } else {
1072 _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, req.actual_size() * HeapWordSize);
1073 }
1074 }
1075 }
1076
1077 static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
1078 size_t ac = alloc_capacity(r);
1079
1080 if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
1081 // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
1082 // Note that retire_from_partition() increases used to account for waste.
1083
1084 // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
1085 // then retire the region so that subsequent searches can find available memory more quickly.
1086
1087 size_t idx = r->index();
1088 ShenandoahFreeSetPartitionId orig_partition;
1089 if (req.is_mutator_alloc()) {
1090 orig_partition = ShenandoahFreeSetPartitionId::Mutator;
1091 } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) {
1092 orig_partition = ShenandoahFreeSetPartitionId::Collector;
1093 } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1094 orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1095 } else {
1096 assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type");
1097 if (req.is_old()) {
1098 orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1099 } else {
1100 orig_partition = ShenandoahFreeSetPartitionId::Collector;
1101 }
1102 }
1103 _partitions.retire_from_partition(orig_partition, idx, r->used());
1104 _partitions.assert_bounds();
1105 }
1106 return result;
1107 }
1108
1109 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
1110 assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
1111 shenandoah_assert_heaplocked();
1112
1113 size_t words_size = req.size();
1114 idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1115
1116 assert(req.is_young(), "Humongous regions always allocated in YOUNG");
1117 ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
1118
1119 // Check if there are enough regions left to satisfy allocation.
1120 if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
1121 return nullptr;
1122 }
1123
1124 idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
1125 idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
1126 idx_t last_possible_start = end_range - num;
1127
1128 // Find the continuous interval of $num regions, starting from $beg and ending in $end,
1129 // inclusive. Contiguous allocations are biased to the beginning.
1130 idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
1131 start_range, num);
1132 if (beg > last_possible_start) {
1133 // Hit the end, goodbye
1134 return nullptr;
1135 }
1136 idx_t end = beg;
1137
1138 while (true) {
1139 // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
1140 // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. If we can extend
1141 // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
1142 while (!can_allocate_from(_heap->get_region(end))) {
1143 // region[end] is not empty, so we restart our search after region[end]
1144 idx_t slide_delta = end + 1 - beg;
1145 if (beg + slide_delta > last_possible_start) {
1146 // no room to slide
1147 return nullptr;
1148 }
1149 for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
1150 if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
1151 beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
1178 ShenandoahHeapRegion* r = _heap->get_region(i);
1179 try_recycle_trashed(r);
1180
1181 assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
1182 assert(r->is_empty(), "Should be empty");
1183
1184 if (i == beg) {
1185 r->make_humongous_start();
1186 } else {
1187 r->make_humongous_cont();
1188 }
1189
1190 // Trailing region may be non-full, record the remainder there
1191 size_t used_words;
1192 if ((i == end) && (remainder != 0)) {
1193 used_words = remainder;
1194 } else {
1195 used_words = ShenandoahHeapRegion::region_size_words();
1196 }
1197
1198 r->set_affiliation(req.affiliation());
1199 r->set_update_watermark(r->bottom());
1200 r->set_top(r->bottom() + used_words);
1201 }
1202 generation->increase_affiliated_region_count(num);
1203 if (remainder != 0) {
1204 // Record this remainder as allocation waste
1205 _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
1206 }
1207
1208 // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
1209 _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
1210
1211 size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
1212 _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
1213 _partitions.assert_bounds();
1214 req.set_actual_size(words_size);
1215 if (remainder != 0) {
1216 req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
1217 }
1218 return _heap->get_region(beg)->bottom();
1219 }
1220
1221 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) {
1222 if (r->is_trash()) {
1223 r->recycle();
1224 }
1225 }
1226
1227 void ShenandoahFreeSet::recycle_trash() {
1228 // lock is not reentrable, check we don't have it
1229 shenandoah_assert_not_heaplocked();
1230
1231 size_t count = 0;
1232 for (size_t i = 0; i < _heap->num_regions(); i++) {
1233 ShenandoahHeapRegion* r = _heap->get_region(i);
1234 if (r->is_trash()) {
1235 _trash_regions[count++] = r;
1236 }
1237 }
1238
1239 // Relinquish the lock after this much time passed.
1240 static constexpr jlong deadline_ns = 30000; // 30 us
1241 size_t idx = 0;
1242 while (idx < count) {
1243 os::naked_yield(); // Yield to allow allocators to take the lock
1244 ShenandoahHeapLocker locker(_heap->lock());
1245 const jlong deadline = os::javaTimeNanos() + deadline_ns;
1246 while (idx < count && os::javaTimeNanos() < deadline) {
1247 try_recycle_trashed(_trash_regions[idx++]);
1248 }
1249 }
1250 }
1251
1252 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
1253 size_t idx = r->index();
1254
1255 assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1256 assert(can_allocate_from(r), "Should not be allocated");
1257
1258 ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
1259 size_t region_capacity = alloc_capacity(r);
1260 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1261 ShenandoahFreeSetPartitionId::OldCollector, region_capacity);
1262 _partitions.assert_bounds();
1263 _heap->old_generation()->augment_evacuation_reserve(region_capacity);
1264 bool transferred = gen_heap->generation_sizer()->transfer_to_old(1);
1265 if (!transferred) {
1266 log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx);
1267 gen_heap->generation_sizer()->force_transfer_to_old(1);
1268 }
1269 // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1270 // to recycle trash before attempting to allocate anything in the region.
1271 }
1272
1273 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1274 size_t idx = r->index();
1275
1276 assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1277 assert(can_allocate_from(r), "Should not be allocated");
1278
1279 size_t ac = alloc_capacity(r);
1280 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1281 ShenandoahFreeSetPartitionId::Collector, ac);
1282 _partitions.assert_bounds();
1283
1284 // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1285 // to recycle trash before attempting to allocate anything in the region.
1286 }
1287
1288 void ShenandoahFreeSet::clear() {
1289 shenandoah_assert_heaplocked();
1290 clear_internal();
1291 }
1292
1293 void ShenandoahFreeSet::clear_internal() {
1294 _partitions.make_all_regions_unavailable();
1295
1296 _alloc_bias_weight = 0;
1297 _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true);
1298 _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Collector, false);
1299 _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false);
1300 }
1301
1302 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
1303 size_t &first_old_region, size_t &last_old_region,
1304 size_t &old_region_count) {
1305 clear_internal();
1306
1307 first_old_region = _heap->num_regions();
1308 last_old_region = 0;
1309 old_region_count = 0;
1310 old_cset_regions = 0;
1311 young_cset_regions = 0;
1312
1313 size_t region_size_bytes = _partitions.region_size_bytes();
1314 size_t max_regions = _partitions.max_regions();
1315
1316 size_t mutator_leftmost = max_regions;
1317 size_t mutator_rightmost = 0;
1318 size_t mutator_leftmost_empty = max_regions;
1319 size_t mutator_rightmost_empty = 0;
1320 size_t mutator_regions = 0;
1321 size_t mutator_used = 0;
1322
1323 size_t old_collector_leftmost = max_regions;
1324 size_t old_collector_rightmost = 0;
1325 size_t old_collector_leftmost_empty = max_regions;
1326 size_t old_collector_rightmost_empty = 0;
1327 size_t old_collector_regions = 0;
1328 size_t old_collector_used = 0;
1329
1330 size_t num_regions = _heap->num_regions();
1331 for (size_t idx = 0; idx < num_regions; idx++) {
1332 ShenandoahHeapRegion* region = _heap->get_region(idx);
1333 if (region->is_trash()) {
1334 // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
1335 // The cset regions are not "trashed" until we have finished update refs.
1336 if (region->is_old()) {
1337 old_cset_regions++;
1338 } else {
1339 assert(region->is_young(), "Trashed region should be old or young");
1340 young_cset_regions++;
1341 }
1342 } else if (region->is_old()) {
1343 // count both humongous and regular regions, but don't count trash (cset) regions.
1344 old_region_count++;
1345 if (first_old_region > idx) {
1346 first_old_region = idx;
1347 }
1348 last_old_region = idx;
1349 }
1350 if (region->is_alloc_allowed() || region->is_trash()) {
1351 assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1352
1353 // Do not add regions that would almost surely fail allocation
1354 size_t ac = alloc_capacity(region);
1355 if (ac > PLAB::min_size() * HeapWordSize) {
1356 if (region->is_trash() || !region->is_old()) {
1357 // Both young and old collected regions (trashed) are placed into the Mutator set
1358 _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
1359 if (idx < mutator_leftmost) {
1360 mutator_leftmost = idx;
1361 }
1362 if (idx > mutator_rightmost) {
1363 mutator_rightmost = idx;
1364 }
1365 if (ac == region_size_bytes) {
1366 if (idx < mutator_leftmost_empty) {
1367 mutator_leftmost_empty = idx;
1368 }
1369 if (idx > mutator_rightmost_empty) {
1370 mutator_rightmost_empty = idx;
1371 }
1372 }
1373 mutator_regions++;
1374 mutator_used += (region_size_bytes - ac);
1375 } else {
1376 // !region->is_trash() && region is_old()
1377 _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::OldCollector);
1378 if (idx < old_collector_leftmost) {
1379 old_collector_leftmost = idx;
1380 }
1381 if (idx > old_collector_rightmost) {
1382 old_collector_rightmost = idx;
1383 }
1384 if (ac == region_size_bytes) {
1385 if (idx < old_collector_leftmost_empty) {
1386 old_collector_leftmost_empty = idx;
1387 }
1388 if (idx > old_collector_rightmost_empty) {
1389 old_collector_rightmost_empty = idx;
1390 }
1391 }
1392 old_collector_regions++;
1393 old_collector_used += (region_size_bytes - ac);
1394 }
1395 }
1396 }
1397 }
1398 log_debug(gc)(" At end of prep_to_rebuild, mutator_leftmost: " SIZE_FORMAT
1399 ", mutator_rightmost: " SIZE_FORMAT
1400 ", mutator_leftmost_empty: " SIZE_FORMAT
1401 ", mutator_rightmost_empty: " SIZE_FORMAT
1402 ", mutator_regions: " SIZE_FORMAT
1403 ", mutator_used: " SIZE_FORMAT,
1404 mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
1405 mutator_regions, mutator_used);
1406
1407 log_debug(gc)(" old_collector_leftmost: " SIZE_FORMAT
1408 ", old_collector_rightmost: " SIZE_FORMAT
1409 ", old_collector_leftmost_empty: " SIZE_FORMAT
1410 ", old_collector_rightmost_empty: " SIZE_FORMAT
1411 ", old_collector_regions: " SIZE_FORMAT
1412 ", old_collector_used: " SIZE_FORMAT,
1413 old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty,
1414 old_collector_regions, old_collector_used);
1415
1416 idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost;
1417 idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty;
1418 _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx,
1419 mutator_regions, mutator_used);
1420 rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost;
1421 rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty;
1422 _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, old_collector_leftmost_empty,
1423 rightmost_empty_idx, old_collector_regions, old_collector_used);
1424 log_debug(gc)(" After find_regions_with_alloc_capacity(), Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1425 " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1426 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1427 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1428 _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1429 _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1430 }
1431
1432 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1433 size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
1434 size_t max_xfer_regions,
1435 size_t& bytes_transferred) {
1436 shenandoah_assert_heaplocked();
1437 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1438 size_t transferred_regions = 0;
1439 idx_t rightmost = _partitions.rightmost_empty(which_collector);
1440 for (idx_t idx = _partitions.leftmost_empty(which_collector); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
1441 assert(_partitions.in_free_set(which_collector, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1442 // Note: can_allocate_from() denotes that region is entirely empty
1443 if (can_allocate_from(idx)) {
1444 _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1445 transferred_regions++;
1446 bytes_transferred += region_size_bytes;
1447 }
1448 idx = _partitions.find_index_of_next_available_region(which_collector, idx + 1);
1449 }
1450 return transferred_regions;
1451 }
1452
1453 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1454 size_t ShenandoahFreeSet::transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId collector_id,
1455 size_t max_xfer_regions,
1456 size_t& bytes_transferred) {
1457 shenandoah_assert_heaplocked();
1458 size_t transferred_regions = 0;
1459 idx_t rightmost = _partitions.rightmost(collector_id);
1460 for (idx_t idx = _partitions.leftmost(collector_id); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
1461 assert(_partitions.in_free_set(collector_id, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
1462 size_t ac = alloc_capacity(idx);
1463 if (ac > 0) {
1464 _partitions.move_from_partition_to_partition(idx, collector_id, ShenandoahFreeSetPartitionId::Mutator, ac);
1465 transferred_regions++;
1466 bytes_transferred += ac;
1467 }
1468 idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
1469 }
1470 return transferred_regions;
1471 }
1472
1473 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
1474 size_t collector_xfer = 0;
1475 size_t old_collector_xfer = 0;
1476
1477 // Process empty regions within the Collector free partition
1478 if ((max_xfer_regions > 0) &&
1479 (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1480 <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1481 ShenandoahHeapLocker locker(_heap->lock());
1482 max_xfer_regions -=
1483 transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1484 collector_xfer);
1485 }
1486
1487 // Process empty regions within the OldCollector free partition
1488 if ((max_xfer_regions > 0) &&
1489 (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)
1490 <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector))) {
1491 ShenandoahHeapLocker locker(_heap->lock());
1492 size_t old_collector_regions =
1493 transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions,
1494 old_collector_xfer);
1495 max_xfer_regions -= old_collector_regions;
1496 if (old_collector_regions > 0) {
1497 ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions);
1498 }
1499 }
1500
1501 // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1502 if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1503 <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1504 ShenandoahHeapLocker locker(_heap->lock());
1505 max_xfer_regions -=
1506 transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1507 collector_xfer);
1508 }
1509
1510 size_t total_xfer = collector_xfer + old_collector_xfer;
1511 log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1512 SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1513 byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1514 byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1515 byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer));
1516 }
1517
1518
1519 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1520 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
1521 size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
1522 shenandoah_assert_heaplocked();
1523 // This resets all state information, removing all regions from all sets.
1524 clear();
1525 log_debug(gc, free)("Rebuilding FreeSet");
1526
1527 // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1528 // mutator set otherwise. All trashed (cset) regions are affiliated young and placed in mutator set.
1529 find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
1530 }
1531
1532 void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count) {
1533 assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity");
1534 if (ShenandoahHeap::heap()->mode()->is_generational()) {
1535 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
1536 ShenandoahOldGeneration* old_gen = heap->old_generation();
1537 ShenandoahYoungGeneration* young_gen = heap->young_generation();
1538 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1539
1540 size_t original_old_capacity = old_gen->max_capacity();
1541 size_t new_old_capacity = old_region_count * region_size_bytes;
1542 size_t new_young_capacity = young_region_count * region_size_bytes;
1543 old_gen->set_capacity(new_old_capacity);
1544 young_gen->set_capacity(new_young_capacity);
1545
1546 if (new_old_capacity > original_old_capacity) {
1547 size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes;
1548 log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
1549 region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity));
1550 } else if (new_old_capacity < original_old_capacity) {
1551 size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes;
1552 log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
1553 region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity));
1554 }
1555 // This balances generations, so clear any pending request to balance.
1556 old_gen->set_region_balance(0);
1557 }
1558 }
1559
1560 void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count,
1561 bool have_evacuation_reserves) {
1562 shenandoah_assert_heaplocked();
1563 size_t young_reserve(0), old_reserve(0);
1564
1565 if (_heap->mode()->is_generational()) {
1566 compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
1567 young_reserve, old_reserve);
1568 } else {
1569 young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1570 old_reserve = 0;
1571 }
1572
1573 // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy
1574 // young_reserve and old_reserve.
1575 reserve_regions(young_reserve, old_reserve, old_region_count);
1576 size_t young_region_count = _heap->num_regions() - old_region_count;
1577 establish_generation_sizes(young_region_count, old_region_count);
1578 establish_old_collector_alloc_bias();
1579 _partitions.assert_bounds();
1580 log_status();
1581 }
1582
1583 void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
1584 bool have_evacuation_reserves,
1585 size_t& young_reserve_result, size_t& old_reserve_result) const {
1586 shenandoah_assert_generational();
1587 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1588
1589 ShenandoahOldGeneration* const old_generation = _heap->old_generation();
1590 size_t old_available = old_generation->available();
1591 size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions();
1592 ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
1593 size_t young_capacity = young_generation->max_capacity();
1594 size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
1595
1596 // Add in the regions we anticipate to be freed by evacuation of the collection set
1597 old_unaffiliated_regions += old_cset_regions;
1598 young_unaffiliated_regions += young_cset_regions;
1599
1600 // Consult old-region balance to make adjustments to current generation capacities and availability.
1601 // The generation region transfers take place after we rebuild.
1602 const ssize_t old_region_balance = old_generation->get_region_balance();
1603 if (old_region_balance != 0) {
1604 #ifdef ASSERT
1605 if (old_region_balance > 0) {
1606 assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1607 } else {
1608 assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1609 }
1610 #endif
1611
1612 ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
1613 old_available -= xfer_bytes;
1614 old_unaffiliated_regions -= old_region_balance;
1615 young_capacity += xfer_bytes;
1616 young_unaffiliated_regions += old_region_balance;
1617 }
1618
1619 // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1620 // promotions and evacuations. The partition between which old memory is reserved for evacuation and
1621 // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
1622 // each PLAB's available memory.
1623 if (have_evacuation_reserves) {
1624 // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1625 const size_t promoted_reserve = old_generation->get_promoted_reserve();
1626 const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
1627 young_reserve_result = young_generation->get_evacuation_reserve();
1628 old_reserve_result = promoted_reserve + old_evac_reserve;
1629 assert(old_reserve_result <= old_available,
1630 "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1631 promoted_reserve, old_evac_reserve, old_available);
1632 } else {
1633 // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1634 young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
1635 // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1636 // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of
1637 // unaffiliated regions.
1638 old_reserve_result = old_available;
1639 }
1640
1641 // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1642 // free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust
1643 // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1644 // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1645 if (old_reserve_result >
1646 _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1647 old_reserve_result =
1648 _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
1649 }
1650
1651 if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
1652 young_reserve_result = young_unaffiliated_regions * region_size_bytes;
1653 }
1654 }
1655
1656 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1657 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1658 // into the collector set or old collector set in order to assure that the memory available for allocations within
1659 // the collector set is at least to_reserve and the memory available for allocations within the old collector set
1660 // is at least to_reserve_old.
1661 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) {
1662 for (size_t i = _heap->num_regions(); i > 0; i--) {
1663 size_t idx = i - 1;
1664 ShenandoahHeapRegion* r = _heap->get_region(idx);
1665 if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1666 continue;
1667 }
1668
1669 size_t ac = alloc_capacity(r);
1670 assert (ac > 0, "Membership in free set implies has capacity");
1671 assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD");
1672
1673 bool move_to_old_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old;
1674 bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1675
1676 if (!move_to_collector && !move_to_old_collector) {
1677 // We've satisfied both to_reserve and to_reserved_old
1678 break;
1679 }
1680
1681 if (move_to_old_collector) {
1682 // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher
1683 // addresses than Collector regions. Presumably, OldCollector regions are more "stable" and less likely to
1684 // be collected in the near future.
1685 if (r->is_trash() || !r->is_affiliated()) {
1686 // OLD regions that have available memory are already in the old_collector free set.
1687 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1688 ShenandoahFreeSetPartitionId::OldCollector, ac);
1689 log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1690 log_debug(gc)(" Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1691 " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1692 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1693 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1694 _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1695 _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1696 old_region_count++;
1697 continue;
1698 }
1699 }
1700
1701 if (move_to_collector) {
1702 // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1703 // they were entirely empty. This has the effect of causing new Mutator allocation to reside next to objects
1704 // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1705 // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1706 // survivor objects is less likely to be selected for the collection set. This alternative implementation allows
1707 // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1708 // occupy regions comprised entirely of ephemeral objects. These regions are highly likely to be included in the next
1709 // collection set, and they are easily evacuated because they have low density of live objects.
1710 _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1711 ShenandoahFreeSetPartitionId::Collector, ac);
1712 log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1713 log_debug(gc)(" Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
1714 " Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
1715 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1716 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1717 _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1718 _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));
1719 }
1720 }
1721
1722 if (LogTarget(Info, gc, free)::is_enabled()) {
1723 size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
1724 if (old_reserve < to_reserve_old) {
1725 log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
1726 PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
1727 }
1728 size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
1729 if (reserve < to_reserve) {
1730 log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1731 PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
1732 }
1733 }
1734 }
1735
1736 void ShenandoahFreeSet::establish_old_collector_alloc_bias() {
1737 ShenandoahHeap* heap = ShenandoahHeap::heap();
1738 shenandoah_assert_heaplocked();
1739
1740 idx_t left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
1741 idx_t right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector);
1742 idx_t middle = (left_idx + right_idx) / 2;
1743 size_t available_in_first_half = 0;
1744 size_t available_in_second_half = 0;
1745
1746 for (idx_t index = left_idx; index < middle; index++) {
1747 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1748 ShenandoahHeapRegion* r = heap->get_region((size_t) index);
1749 available_in_first_half += r->free();
1750 }
1751 }
1752 for (idx_t index = middle; index <= right_idx; index++) {
1753 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1754 ShenandoahHeapRegion* r = heap->get_region(index);
1755 available_in_second_half += r->free();
1756 }
1757 }
1758
1759 // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
1760 // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
1761 // request. Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
1762 // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
1763 _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector,
1764 (available_in_second_half > available_in_first_half));
1765 }
1766
1767 void ShenandoahFreeSet::log_status_under_lock() {
1768 // Must not be heap locked, it acquires heap lock only when log is enabled
1769 shenandoah_assert_not_heaplocked();
1770 if (LogTarget(Info, gc, free)::is_enabled()
1771 DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
1772 ShenandoahHeapLocker locker(_heap->lock());
1773 log_status();
1774 }
1775 }
1776
1777 void ShenandoahFreeSet::log_status() {
1778 shenandoah_assert_heaplocked();
1779
1780 #ifdef ASSERT
1781 // Dump of the FreeSet details is only enabled if assertions are enabled
1782 if (LogTarget(Debug, gc, free)::is_enabled()) {
1783 #define BUFFER_SIZE 80
1784 size_t retired_old = 0;
1785 size_t retired_old_humongous = 0;
1786 size_t retired_young = 0;
1787 size_t retired_young_humongous = 0;
1788 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1789 size_t retired_young_waste = 0;
1790 size_t retired_old_waste = 0;
1791 size_t consumed_collector = 0;
1792 size_t consumed_old_collector = 0;
1793 size_t consumed_mutator = 0;
1794 size_t available_old = 0;
1795 size_t available_young = 0;
1796 size_t available_mutator = 0;
1797 size_t available_collector = 0;
1798 size_t available_old_collector = 0;
1799
1800 char buffer[BUFFER_SIZE];
1801 for (uint i = 0; i < BUFFER_SIZE; i++) {
1802 buffer[i] = '\0';
1803 }
1804
1805 log_debug(gc)("FreeSet map legend:"
1806 " M:mutator_free C:collector_free O:old_collector_free"
1807 " H:humongous ~:retired old _:retired young");
1808 log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocating from %s, "
1809 " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1810 "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
1811 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1812 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1813 _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left",
1814 _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1815 _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector),
1816 _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1817 _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector),
1818 _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left");
1819
1820 for (uint i = 0; i < _heap->num_regions(); i++) {
1821 ShenandoahHeapRegion *r = _heap->get_region(i);
1822 uint idx = i % 64;
1823 if ((i != 0) && (idx == 0)) {
1824 log_debug(gc)(" %6u: %s", i-64, buffer);
1825 }
1826 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
1827 size_t capacity = alloc_capacity(r);
1828 assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in mutator_free set");
1829 available_mutator += capacity;
1830 consumed_mutator += region_size_bytes - capacity;
1831 buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1832 } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
1833 size_t capacity = alloc_capacity(r);
1834 assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in collector_free set");
1835 available_collector += capacity;
1836 consumed_collector += region_size_bytes - capacity;
1837 buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
1838 } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, i)) {
1839 size_t capacity = alloc_capacity(r);
1840 available_old_collector += capacity;
1841 consumed_old_collector += region_size_bytes - capacity;
1842 buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
1843 } else if (r->is_humongous()) {
1844 if (r->is_old()) {
1845 buffer[idx] = 'H';
1846 retired_old_humongous += region_size_bytes;
1847 } else {
1848 buffer[idx] = 'h';
1849 retired_young_humongous += region_size_bytes;
1850 }
1851 } else {
1852 if (r->is_old()) {
1853 buffer[idx] = '~';
1854 retired_old_waste += alloc_capacity(r);
1855 retired_old += region_size_bytes;
1856 } else {
1857 buffer[idx] = '_';
1858 retired_young_waste += alloc_capacity(r);
1859 retired_young += region_size_bytes;
1860 }
1861 }
1862 }
1863 uint remnant = _heap->num_regions() % 64;
1864 if (remnant > 0) {
1865 buffer[remnant] = '\0';
1866 } else {
1867 remnant = 64;
1868 }
1869 log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1870 }
1871 #endif
1872
1873 LogTarget(Info, gc, free) lt;
1874 if (lt.is_enabled()) {
1875 ResourceMark rm;
1876 LogStream ls(lt);
1877
1878 {
1879 idx_t last_idx = 0;
1880 size_t max = 0;
1898 } else {
1899 empty_contig = 1;
1900 }
1901 } else {
1902 empty_contig = 0;
1903 }
1904 total_used += r->used();
1905 total_free += free;
1906 max_contig = MAX2(max_contig, empty_contig);
1907 last_idx = idx;
1908 }
1909 }
1910
1911 size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1912 size_t free = capacity() - used();
1913
1914 // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1915 // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1916 // my internally tracked values of used() and free().
1917 assert(free == total_free, "Free memory should match");
1918 ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1919 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1920 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
1921 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1922 );
1923
1924 ls.print("Frag: ");
1925 size_t frag_ext;
1926 if (total_free_ext > 0) {
1927 frag_ext = 100 - (100 * max_humongous / total_free_ext);
1928 } else {
1929 frag_ext = 0;
1930 }
1931 ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1932
1933 size_t frag_int;
1934 if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1935 frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1936 / ShenandoahHeapRegion::region_size_bytes());
1937 } else {
1938 frag_int = 0;
1939 }
1946 {
1947 size_t max = 0;
1948 size_t total_free = 0;
1949 size_t total_used = 0;
1950
1951 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1952 idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1953 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
1954 ShenandoahHeapRegion *r = _heap->get_region(idx);
1955 size_t free = alloc_capacity(r);
1956 max = MAX2(max, free);
1957 total_free += free;
1958 total_used += r->used();
1959 }
1960 }
1961 ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1962 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1963 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
1964 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1965 }
1966
1967 if (_heap->mode()->is_generational()) {
1968 size_t max = 0;
1969 size_t total_free = 0;
1970 size_t total_used = 0;
1971
1972 for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
1973 idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); idx++) {
1974 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, idx)) {
1975 ShenandoahHeapRegion *r = _heap->get_region(idx);
1976 size_t free = alloc_capacity(r);
1977 max = MAX2(max, free);
1978 total_free += free;
1979 total_used += r->used();
1980 }
1981 }
1982 ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1983 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1984 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
1985 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1986 }
1987 }
1988 }
1989
1990 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1991 shenandoah_assert_heaplocked();
1992 if (ShenandoahHeapRegion::requires_humongous(req.size())) {
1993 switch (req.type()) {
1994 case ShenandoahAllocRequest::_alloc_shared:
1995 case ShenandoahAllocRequest::_alloc_shared_gc:
1996 in_new_region = true;
1997 return allocate_contiguous(req);
1998 case ShenandoahAllocRequest::_alloc_plab:
1999 case ShenandoahAllocRequest::_alloc_gclab:
2000 case ShenandoahAllocRequest::_alloc_tlab:
2001 in_new_region = false;
2002 assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size());
2003 return nullptr;
2004 default:
2005 ShouldNotReachHere();
2006 return nullptr;
2007 }
2008 } else {
2009 return allocate_single(req, in_new_region);
2010 }
2011 }
2012
2013 void ShenandoahFreeSet::print_on(outputStream* out) const {
2014 out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
2015 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
2016 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
2017 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
2018 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2019 _heap->get_region(index)->print_on(out);
2020 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
2021 }
2022 out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
2023 rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
2024 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
2025 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
2026 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2027 _heap->get_region(index)->print_on(out);
2028 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);
2029 }
2030 if (_heap->mode()->is_generational()) {
2031 out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector));
2032 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
2033 index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) {
2034 if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
2035 _heap->get_region(index)->print_on(out);
2036 }
2037 }
2038 }
2039 }
2040
2041 double ShenandoahFreeSet::internal_fragmentation() {
2042 double squared = 0;
2043 double linear = 0;
2044
2045 idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
2046 for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
2047 assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
2048 "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
2049 ShenandoahHeapRegion* r = _heap->get_region(index);
2050 size_t used = r->used();
2051 squared += used * used;
2052 linear += used;
2053 index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
2054 }
2055
2056 if (linear > 0) {
2057 double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
2058 return 1 - s;
|