1 /*
2 * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/tlab_globals.hpp"
27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
31 #include "logging/logStream.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "runtime/orderAccess.hpp"
34
35 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
36 _heap(heap),
37 _mutator_free_bitmap(max_regions, mtGC),
38 _collector_free_bitmap(max_regions, mtGC),
39 _max(max_regions)
40 {
41 clear_internal();
42 }
43
44 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
45 shenandoah_assert_heaplocked();
46 _used += num_bytes;
47
48 assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
49 ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
50 }
51
52 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
53 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
54 idx, _max, _mutator_leftmost, _mutator_rightmost);
55 return _mutator_free_bitmap.at(idx);
56 }
57
58 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
59 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
60 idx, _max, _collector_leftmost, _collector_rightmost);
61 return _collector_free_bitmap.at(idx);
62 }
63
64 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
65 // Scan the bitmap looking for a first fit.
66 //
67 // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
68 // we would find the region to allocate at right away.
69 //
70 // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
71 // go to the end. This makes application allocation faster, because we would clear lots
72 // of regions from the beginning most of the time.
73 //
74 // Free set maintains mutator and collector views, and normally they allocate in their views only,
75 // unless we special cases for stealing and mixed allocations.
76
77 switch (req.type()) {
78 case ShenandoahAllocRequest::_alloc_tlab:
79 case ShenandoahAllocRequest::_alloc_shared: {
80
81 // Try to allocate in the mutator view
82 for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
83 if (is_mutator_free(idx)) {
84 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
85 if (result != nullptr) {
86 return result;
87 }
88 }
89 }
90
91 // There is no recovery. Mutator does not touch collector view at all.
92 break;
93 }
94 case ShenandoahAllocRequest::_alloc_gclab:
95 case ShenandoahAllocRequest::_alloc_shared_gc: {
96 // size_t is unsigned, need to dodge underflow when _leftmost = 0
97
98 // Fast-path: try to allocate in the collector view first
99 for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
100 size_t idx = c - 1;
101 if (is_collector_free(idx)) {
102 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
103 if (result != nullptr) {
104 return result;
105 }
106 }
107 }
108
109 // No dice. Can we borrow space from mutator view?
110 if (!ShenandoahEvacReserveOverflow) {
111 return nullptr;
112 }
113
114 // Try to steal the empty region from the mutator view
115 for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
116 size_t idx = c - 1;
117 if (is_mutator_free(idx)) {
118 ShenandoahHeapRegion* r = _heap->get_region(idx);
119 if (can_allocate_from(r)) {
120 flip_to_gc(r);
121 HeapWord *result = try_allocate_in(r, req, in_new_region);
122 if (result != nullptr) {
123 return result;
124 }
125 }
126 }
127 }
128
129 // No dice. Do not try to mix mutator and GC allocations, because
130 // URWM moves due to GC allocations would expose unparsable mutator
131 // allocations.
132
133 break;
134 }
135 default:
136 ShouldNotReachHere();
137 }
138
139 return nullptr;
140 }
141
142 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
143 assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
144
145 if (_heap->is_concurrent_weak_root_in_progress() &&
146 r->is_trash()) {
147 return nullptr;
148 }
149
150 try_recycle_trashed(r);
151
152 in_new_region = r->is_empty();
153
154 HeapWord* result = nullptr;
155 size_t size = req.size();
156
157 if (req.is_lab_alloc()) {
158 size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
159 if (size > free) {
160 size = free;
161 }
162 if (size >= req.min_size()) {
163 result = r->allocate(size, req.type());
164 assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
165 }
166 } else {
167 result = r->allocate(size, req.type());
168 }
169
170 if (result != nullptr) {
171 // Allocation successful, bump stats:
172 if (req.is_mutator_alloc()) {
173 increase_used(size * HeapWordSize);
174 }
175
176 // Record actual allocation size
177 req.set_actual_size(size);
178
179 if (req.is_gc_alloc()) {
180 r->set_update_watermark(r->top());
181 }
182 }
183
184 if (result == nullptr || has_no_alloc_capacity(r)) {
185 // Region cannot afford this or future allocations. Retire it.
186 //
187 // While this seems a bit harsh, especially in the case when this large allocation does not
188 // fit, but the next small one would, we are risking to inflate scan times when lots of
189 // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
190 // TODO: Record first fully-empty region, and use that for large allocations
191
192 // Record the remainder as allocation waste
193 if (req.is_mutator_alloc()) {
194 size_t waste = r->free();
195 if (waste > 0) {
196 increase_used(waste);
197 _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
198 }
199 }
200
201 size_t num = r->index();
202 _collector_free_bitmap.clear_bit(num);
203 _mutator_free_bitmap.clear_bit(num);
204 // Touched the bounds? Need to update:
205 if (touches_bounds(num)) {
206 adjust_bounds();
207 }
208 assert_bounds();
209 }
210 return result;
211 }
212
213 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
214 return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
215 }
216
217 void ShenandoahFreeSet::recompute_bounds() {
218 // Reset to the most pessimistic case:
219 _mutator_rightmost = _max - 1;
220 _mutator_leftmost = 0;
221 _collector_rightmost = _max - 1;
222 _collector_leftmost = 0;
223
224 // ...and adjust from there
225 adjust_bounds();
226 }
227
228 void ShenandoahFreeSet::adjust_bounds() {
229 // Rewind both mutator bounds until the next bit.
230 while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
231 _mutator_leftmost++;
232 }
233 while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
234 _mutator_rightmost--;
235 }
236 // Rewind both collector bounds until the next bit.
237 while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
238 _collector_leftmost++;
239 }
240 while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
241 _collector_rightmost--;
242 }
243 }
244
245 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
246 shenandoah_assert_heaplocked();
247
248 size_t words_size = req.size();
249 size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
250
251 // No regions left to satisfy allocation, bye.
252 if (num > mutator_count()) {
253 return nullptr;
254 }
255
256 // Find the continuous interval of $num regions, starting from $beg and ending in $end,
257 // inclusive. Contiguous allocations are biased to the beginning.
258
259 size_t beg = _mutator_leftmost;
260 size_t end = beg;
261
262 while (true) {
263 if (end >= _max) {
264 // Hit the end, goodbye
265 return nullptr;
266 }
267
268 // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
269 // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
270 if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
271 end++;
272 beg = end;
273 continue;
274 }
275
276 if ((end - beg + 1) == num) {
277 // found the match
278 break;
279 }
280
281 end++;
282 }
283
284 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
285
286 // Initialize regions:
287 for (size_t i = beg; i <= end; i++) {
288 ShenandoahHeapRegion* r = _heap->get_region(i);
289 try_recycle_trashed(r);
290
291 assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
292 assert(r->is_empty(), "Should be empty");
293
294 if (i == beg) {
295 r->make_humongous_start();
296 } else {
297 r->make_humongous_cont();
298 }
299
300 // Trailing region may be non-full, record the remainder there
301 size_t used_words;
302 if ((i == end) && (remainder != 0)) {
303 used_words = remainder;
304 } else {
305 used_words = ShenandoahHeapRegion::region_size_words();
306 }
307
308 r->set_top(r->bottom() + used_words);
309
310 _mutator_free_bitmap.clear_bit(r->index());
311 }
312
313 // While individual regions report their true use, all humongous regions are
314 // marked used in the free set.
315 increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
316
317 if (remainder != 0) {
318 // Record this remainder as allocation waste
319 _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
320 }
321
322 // Allocated at left/rightmost? Move the bounds appropriately.
323 if (beg == _mutator_leftmost || end == _mutator_rightmost) {
324 adjust_bounds();
325 }
326 assert_bounds();
327
328 req.set_actual_size(words_size);
329 return _heap->get_region(beg)->bottom();
330 }
331
332 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
333 return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
334 }
335
336 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
337 if (r->is_trash()) {
338 // This would be recycled on allocation path
339 return ShenandoahHeapRegion::region_size_bytes();
340 } else {
341 return r->free();
342 }
343 }
344
345 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
346 return alloc_capacity(r) == 0;
347 }
348
349 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
350 if (r->is_trash()) {
351 _heap->decrease_used(r->used());
352 r->recycle();
353 }
354 }
355
356 void ShenandoahFreeSet::recycle_trash() {
357 // lock is not reentrable, check we don't have it
358 shenandoah_assert_not_heaplocked();
359
360 for (size_t i = 0; i < _heap->num_regions(); i++) {
361 ShenandoahHeapRegion* r = _heap->get_region(i);
362 if (r->is_trash()) {
363 ShenandoahHeapLocker locker(_heap->lock());
364 try_recycle_trashed(r);
365 }
366 SpinPause(); // allow allocators to take the lock
367 }
368 }
369
370 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
371 size_t idx = r->index();
372
373 assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
374 assert(can_allocate_from(r), "Should not be allocated");
375
376 _mutator_free_bitmap.clear_bit(idx);
377 _collector_free_bitmap.set_bit(idx);
378 _collector_leftmost = MIN2(idx, _collector_leftmost);
379 _collector_rightmost = MAX2(idx, _collector_rightmost);
380
381 _capacity -= alloc_capacity(r);
382
383 if (touches_bounds(idx)) {
384 adjust_bounds();
385 }
386 assert_bounds();
387 }
388
389 void ShenandoahFreeSet::clear() {
390 shenandoah_assert_heaplocked();
391 clear_internal();
392 }
393
394 void ShenandoahFreeSet::clear_internal() {
395 _mutator_free_bitmap.clear();
396 _collector_free_bitmap.clear();
397 _mutator_leftmost = _max;
398 _mutator_rightmost = 0;
399 _collector_leftmost = _max;
400 _collector_rightmost = 0;
401 _capacity = 0;
402 _used = 0;
403 }
404
405 void ShenandoahFreeSet::rebuild() {
406 shenandoah_assert_heaplocked();
407 clear();
408
409 for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
410 ShenandoahHeapRegion* region = _heap->get_region(idx);
411 if (region->is_alloc_allowed() || region->is_trash()) {
412 assert(!region->is_cset(), "Shouldn't be adding those to the free set");
413
414 // Do not add regions that would surely fail allocation
415 if (has_no_alloc_capacity(region)) continue;
416
417 _capacity += alloc_capacity(region);
418 assert(_used <= _capacity, "must not use more than we have");
419
420 assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
421 _mutator_free_bitmap.set_bit(idx);
422 }
423 }
424
425 // Evac reserve: reserve trailing space for evacuations
426 size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
427 size_t reserved = 0;
428
429 for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
430 if (reserved >= to_reserve) break;
431
432 ShenandoahHeapRegion* region = _heap->get_region(idx);
433 if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
434 _mutator_free_bitmap.clear_bit(idx);
435 _collector_free_bitmap.set_bit(idx);
436 size_t ac = alloc_capacity(region);
437 _capacity -= ac;
438 reserved += ac;
439 }
440 }
441
442 recompute_bounds();
443 assert_bounds();
444 }
445
446 void ShenandoahFreeSet::log_status() {
447 shenandoah_assert_heaplocked();
448
449 LogTarget(Info, gc, ergo) lt;
450 if (lt.is_enabled()) {
451 ResourceMark rm;
452 LogStream ls(lt);
453
454 {
455 size_t last_idx = 0;
456 size_t max = 0;
457 size_t max_contig = 0;
458 size_t empty_contig = 0;
459
460 size_t total_used = 0;
461 size_t total_free = 0;
462 size_t total_free_ext = 0;
463
464 for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
465 if (is_mutator_free(idx)) {
466 ShenandoahHeapRegion *r = _heap->get_region(idx);
467 size_t free = alloc_capacity(r);
468
469 max = MAX2(max, free);
470
471 if (r->is_empty()) {
472 total_free_ext += free;
473 if (last_idx + 1 == idx) {
474 empty_contig++;
475 } else {
476 empty_contig = 1;
477 }
478 } else {
479 empty_contig = 0;
480 }
481
482 total_used += r->used();
483 total_free += free;
484
485 max_contig = MAX2(max_contig, empty_contig);
486 last_idx = idx;
487 }
488 }
489
490 size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
491 size_t free = capacity() - used();
492
493 ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
494 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
495 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
496 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
497 );
498
499 ls.print("Frag: ");
500 size_t frag_ext;
501 if (total_free_ext > 0) {
502 frag_ext = 100 - (100 * max_humongous / total_free_ext);
503 } else {
504 frag_ext = 0;
505 }
506 ls.print(SIZE_FORMAT "%% external, ", frag_ext);
507
508 size_t frag_int;
509 if (mutator_count() > 0) {
510 frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
511 } else {
512 frag_int = 0;
513 }
514 ls.print(SIZE_FORMAT "%% internal; ", frag_int);
515 }
516
517 {
518 size_t max = 0;
519 size_t total_free = 0;
520
521 for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
522 if (is_collector_free(idx)) {
523 ShenandoahHeapRegion *r = _heap->get_region(idx);
524 size_t free = alloc_capacity(r);
525 max = MAX2(max, free);
526 total_free += free;
527 }
528 }
529
530 ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
531 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
532 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max));
533 }
534 }
535 }
536
537 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
538 shenandoah_assert_heaplocked();
539 assert_bounds();
540
541 if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
542 switch (req.type()) {
543 case ShenandoahAllocRequest::_alloc_shared:
544 case ShenandoahAllocRequest::_alloc_shared_gc:
545 in_new_region = true;
546 return allocate_contiguous(req);
547 case ShenandoahAllocRequest::_alloc_gclab:
548 case ShenandoahAllocRequest::_alloc_tlab:
549 in_new_region = false;
550 assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
551 req.size(), ShenandoahHeapRegion::humongous_threshold_words());
552 return nullptr;
553 default:
554 ShouldNotReachHere();
555 return nullptr;
556 }
557 } else {
558 return allocate_single(req, in_new_region);
559 }
560 }
561
562 size_t ShenandoahFreeSet::unsafe_peek_free() const {
563 // Deliberately not locked, this method is unsafe when free set is modified.
564
565 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
566 if (index < _max && is_mutator_free(index)) {
567 ShenandoahHeapRegion* r = _heap->get_region(index);
568 if (r->free() >= MinTLABSize) {
569 return r->free();
570 }
571 }
572 }
573
574 // It appears that no regions left
575 return 0;
576 }
577
578 void ShenandoahFreeSet::print_on(outputStream* out) const {
579 out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
580 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
581 if (is_mutator_free(index)) {
582 _heap->get_region(index)->print_on(out);
583 }
584 }
585 out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
586 for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
587 if (is_collector_free(index)) {
588 _heap->get_region(index)->print_on(out);
589 }
590 }
591 }
592
593 /*
594 * Internal fragmentation metric: describes how fragmented the heap regions are.
595 *
596 * It is derived as:
597 *
598 * sum(used[i]^2, i=0..k)
599 * IF = 1 - ------------------------------
600 * C * sum(used[i], i=0..k)
601 *
602 * ...where k is the number of regions in computation, C is the region capacity, and
603 * used[i] is the used space in the region.
604 *
605 * The non-linearity causes IF to be lower for the cases where the same total heap
606 * used is densely packed. For example:
607 * a) Heap is completely full => IF = 0
608 * b) Heap is half full, first 50% regions are completely full => IF = 0
609 * c) Heap is half full, each region is 50% full => IF = 1/2
610 * d) Heap is quarter full, first 50% regions are completely full => IF = 0
611 * e) Heap is quarter full, each region is 25% full => IF = 3/4
612 * f) Heap has one small object per each region => IF =~ 1
613 */
614 double ShenandoahFreeSet::internal_fragmentation() {
615 double squared = 0;
616 double linear = 0;
617 int count = 0;
618
619 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
620 if (is_mutator_free(index)) {
621 ShenandoahHeapRegion* r = _heap->get_region(index);
622 size_t used = r->used();
623 squared += used * used;
624 linear += used;
625 count++;
626 }
627 }
628
629 if (count > 0) {
630 double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
631 return 1 - s;
632 } else {
633 return 0;
634 }
635 }
636
637 /*
638 * External fragmentation metric: describes how fragmented the heap is.
639 *
640 * It is derived as:
641 *
642 * EF = 1 - largest_contiguous_free / total_free
643 *
644 * For example:
645 * a) Heap is completely empty => EF = 0
646 * b) Heap is completely full => EF = 0
647 * c) Heap is first-half full => EF = 1/2
648 * d) Heap is half full, full and empty regions interleave => EF =~ 1
649 */
650 double ShenandoahFreeSet::external_fragmentation() {
651 size_t last_idx = 0;
652 size_t max_contig = 0;
653 size_t empty_contig = 0;
654
655 size_t free = 0;
656
657 for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
658 if (is_mutator_free(index)) {
659 ShenandoahHeapRegion* r = _heap->get_region(index);
660 if (r->is_empty()) {
661 free += ShenandoahHeapRegion::region_size_bytes();
662 if (last_idx + 1 == index) {
663 empty_contig++;
664 } else {
665 empty_contig = 1;
666 }
667 } else {
668 empty_contig = 0;
669 }
670
671 max_contig = MAX2(max_contig, empty_contig);
672 last_idx = index;
673 }
674 }
675
676 if (free > 0) {
677 return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
678 } else {
679 return 0;
680 }
681 }
682
683 #ifdef ASSERT
684 void ShenandoahFreeSet::assert_bounds() const {
685 // Performance invariants. Failing these would not break the free set, but performance
686 // would suffer.
687 assert (_mutator_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost, _max);
688 assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
689
690 assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost), "leftmost region should be free: " SIZE_FORMAT, _mutator_leftmost);
691 assert (_mutator_rightmost == 0 || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
692
693 size_t beg_off = _mutator_free_bitmap.find_first_set_bit(0);
694 size_t end_off = _mutator_free_bitmap.find_first_set_bit(_mutator_rightmost + 1);
695 assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
696 assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _mutator_rightmost);
697
698 assert (_collector_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost, _max);
699 assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
700
701 assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost), "leftmost region should be free: " SIZE_FORMAT, _collector_leftmost);
702 assert (_collector_rightmost == 0 || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
703
704 beg_off = _collector_free_bitmap.find_first_set_bit(0);
705 end_off = _collector_free_bitmap.find_first_set_bit(_collector_rightmost + 1);
706 assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
707 assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _collector_rightmost);
708 }
709 #endif
|
1 /*
2 * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shared/tlab_globals.hpp"
28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
37 #include "logging/logStream.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "runtime/orderAccess.hpp"
40
41 ShenandoahSetsOfFree::ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set) :
42 _max(max_regions),
43 _free_set(free_set),
44 _region_size_bytes(ShenandoahHeapRegion::region_size_bytes())
45 {
46 _membership = NEW_C_HEAP_ARRAY(ShenandoahFreeMemoryType, max_regions, mtGC);
47 clear_internal();
48 }
49
50 ShenandoahSetsOfFree::~ShenandoahSetsOfFree() {
51 FREE_C_HEAP_ARRAY(ShenandoahFreeMemoryType, _membership);
52 }
53
54
55 void ShenandoahSetsOfFree::clear_internal() {
56 for (size_t idx = 0; idx < _max; idx++) {
57 _membership[idx] = NotFree;
58 }
59
60 for (size_t idx = 0; idx < NumFreeSets; idx++) {
61 _leftmosts[idx] = _max;
62 _rightmosts[idx] = 0;
63 _leftmosts_empty[idx] = _max;
64 _rightmosts_empty[idx] = 0;
65 _capacity_of[idx] = 0;
66 _used_by[idx] = 0;
67 }
68
69 _left_to_right_bias[Mutator] = true;
70 _left_to_right_bias[Collector] = false;
71 _left_to_right_bias[OldCollector] = false;
72
73 _region_counts[Mutator] = 0;
74 _region_counts[Collector] = 0;
75 _region_counts[OldCollector] = 0;
76 _region_counts[NotFree] = _max;
77 }
78
79 void ShenandoahSetsOfFree::clear_all() {
80 clear_internal();
81 }
82
83 void ShenandoahSetsOfFree::increase_used(ShenandoahFreeMemoryType which_set, size_t bytes) {
84 assert (which_set > NotFree && which_set < NumFreeSets, "Set must correspond to a valid freeset");
85 _used_by[which_set] += bytes;
86 assert (_used_by[which_set] <= _capacity_of[which_set],
87 "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
88 _used_by[which_set], _capacity_of[which_set], bytes);
89 }
90
91 inline void ShenandoahSetsOfFree::shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx) {
92 if (idx == _leftmosts[set]) {
93 while ((_leftmosts[set] < _max) && !in_free_set(_leftmosts[set], set)) {
94 _leftmosts[set]++;
95 }
96 if (_leftmosts_empty[set] < _leftmosts[set]) {
97 // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
98 _leftmosts_empty[set] = _leftmosts[set];
99 }
100 }
101 if (idx == _rightmosts[set]) {
102 while (_rightmosts[set] > 0 && !in_free_set(_rightmosts[set], set)) {
103 _rightmosts[set]--;
104 }
105 if (_rightmosts_empty[set] > _rightmosts[set]) {
106 // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
107 _rightmosts_empty[set] = _rightmosts[set];
108 }
109 }
110 }
111
112 inline void ShenandoahSetsOfFree::expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t region_capacity) {
113 if (region_capacity == _region_size_bytes) {
114 if (_leftmosts_empty[set] > idx) {
115 _leftmosts_empty[set] = idx;
116 }
117 if (_rightmosts_empty[set] < idx) {
118 _rightmosts_empty[set] = idx;
119 }
120 }
121 if (_leftmosts[set] > idx) {
122 _leftmosts[set] = idx;
123 }
124 if (_rightmosts[set] < idx) {
125 _rightmosts[set] = idx;
126 }
127 }
128
129 void ShenandoahSetsOfFree::remove_from_free_sets(size_t idx) {
130 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
131 ShenandoahFreeMemoryType orig_set = membership(idx);
132 assert (orig_set > NotFree && orig_set < NumFreeSets, "Cannot remove from free sets if not already free");
133 _membership[idx] = NotFree;
134 shrink_bounds_if_touched(orig_set, idx);
135
136 _region_counts[orig_set]--;
137 _region_counts[NotFree]++;
138 }
139
140
141 void ShenandoahSetsOfFree::make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity) {
142 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
143 assert (_membership[idx] == NotFree, "Cannot make free if already free");
144 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
145 _membership[idx] = which_set;
146 _capacity_of[which_set] += region_capacity;
147 expand_bounds_maybe(which_set, idx, region_capacity);
148
149 _region_counts[NotFree]--;
150 _region_counts[which_set]++;
151 }
152
153 void ShenandoahSetsOfFree::move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity) {
154 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
155 assert ((new_set > NotFree) && (new_set < NumFreeSets), "New set must be valid");
156 ShenandoahFreeMemoryType orig_set = _membership[idx];
157 assert ((orig_set > NotFree) && (orig_set < NumFreeSets), "Cannot move free unless already free");
158 // Expected transitions:
159 // During rebuild: Mutator => Collector
160 // Mutator empty => Collector
161 // During flip_to_gc:
162 // Mutator empty => Collector
163 // Mutator empty => Old Collector
164 // At start of update refs:
165 // Collector => Mutator
166 // OldCollector Empty => Mutator
167 assert((region_capacity <= _region_size_bytes && ((orig_set == Mutator && new_set == Collector) || (orig_set == Collector && new_set == Mutator)))
168 || (region_capacity == _region_size_bytes && ((orig_set == Mutator && new_set == Collector) || (orig_set == OldCollector && new_set == Mutator) || new_set == OldCollector)),
169 "Unexpected movement between sets");
170
171 _membership[idx] = new_set;
172 _capacity_of[orig_set] -= region_capacity;
173 shrink_bounds_if_touched(orig_set, idx);
174
175 _capacity_of[new_set] += region_capacity;
176 expand_bounds_maybe(new_set, idx, region_capacity);
177
178 _region_counts[orig_set]--;
179 _region_counts[new_set]++;
180 }
181
182 inline ShenandoahFreeMemoryType ShenandoahSetsOfFree::membership(size_t idx) const {
183 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
184 return _membership[idx];
185 }
186
187 // Returns true iff region idx is in the test_set free_set. Before returning true, asserts that the free
188 // set is not empty. Requires that test_set != NotFree or NumFreeSets.
189 inline bool ShenandoahSetsOfFree::in_free_set(size_t idx, ShenandoahFreeMemoryType test_set) const {
190 assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
191 if (_membership[idx] == test_set) {
192 assert (test_set == NotFree || _free_set->alloc_capacity(idx) > 0, "Free regions must have alloc capacity");
193 return true;
194 } else {
195 return false;
196 }
197 }
198
199 inline size_t ShenandoahSetsOfFree::leftmost(ShenandoahFreeMemoryType which_set) const {
200 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
201 size_t idx = _leftmosts[which_set];
202 if (idx >= _max) {
203 return _max;
204 } else {
205 assert (in_free_set(idx, which_set), "left-most region must be free");
206 return idx;
207 }
208 }
209
210 inline size_t ShenandoahSetsOfFree::rightmost(ShenandoahFreeMemoryType which_set) const {
211 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
212 size_t idx = _rightmosts[which_set];
213 assert ((_leftmosts[which_set] == _max) || in_free_set(idx, which_set), "right-most region must be free");
214 return idx;
215 }
216
217 inline bool ShenandoahSetsOfFree::is_empty(ShenandoahFreeMemoryType which_set) const {
218 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
219 return (leftmost(which_set) > rightmost(which_set));
220 }
221
222 size_t ShenandoahSetsOfFree::leftmost_empty(ShenandoahFreeMemoryType which_set) {
223 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
224 for (size_t idx = _leftmosts_empty[which_set]; idx < _max; idx++) {
225 if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
226 _leftmosts_empty[which_set] = idx;
227 return idx;
228 }
229 }
230 _leftmosts_empty[which_set] = _max;
231 _rightmosts_empty[which_set] = 0;
232 return _max;
233 }
234
235 inline size_t ShenandoahSetsOfFree::rightmost_empty(ShenandoahFreeMemoryType which_set) {
236 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
237 for (intptr_t idx = _rightmosts_empty[which_set]; idx >= 0; idx--) {
238 if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
239 _rightmosts_empty[which_set] = idx;
240 return idx;
241 }
242 }
243 _leftmosts_empty[which_set] = _max;
244 _rightmosts_empty[which_set] = 0;
245 return 0;
246 }
247
248 inline bool ShenandoahSetsOfFree::alloc_from_left_bias(ShenandoahFreeMemoryType which_set) {
249 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
250 return _left_to_right_bias[which_set];
251 }
252
253 void ShenandoahSetsOfFree::establish_alloc_bias(ShenandoahFreeMemoryType which_set) {
254 ShenandoahHeap* heap = ShenandoahHeap::heap();
255 shenandoah_assert_heaplocked();
256 assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
257
258 size_t middle = (_leftmosts[which_set] + _rightmosts[which_set]) / 2;
259 size_t available_in_first_half = 0;
260 size_t available_in_second_half = 0;
261
262 for (size_t index = _leftmosts[which_set]; index < middle; index++) {
263 if (in_free_set(index, which_set)) {
264 ShenandoahHeapRegion* r = heap->get_region(index);
265 available_in_first_half += r->free();
266 }
267 }
268 for (size_t index = middle; index <= _rightmosts[which_set]; index++) {
269 if (in_free_set(index, which_set)) {
270 ShenandoahHeapRegion* r = heap->get_region(index);
271 available_in_second_half += r->free();
272 }
273 }
274
275 // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
276 // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
277 // request. Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
278 // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
279
280 // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the
281 // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left.
282 _left_to_right_bias[which_set] = (available_in_second_half > available_in_first_half);
283 }
284
285 #ifdef ASSERT
286 void ShenandoahSetsOfFree::assert_bounds() {
287
288 size_t leftmosts[NumFreeSets];
289 size_t rightmosts[NumFreeSets];
290 size_t empty_leftmosts[NumFreeSets];
291 size_t empty_rightmosts[NumFreeSets];
292
293 for (int i = 0; i < NumFreeSets; i++) {
294 leftmosts[i] = _max;
295 empty_leftmosts[i] = _max;
296 rightmosts[i] = 0;
297 empty_rightmosts[i] = 0;
298 }
299
300 for (size_t i = 0; i < _max; i++) {
301 ShenandoahFreeMemoryType set = membership(i);
302 switch (set) {
303 case NotFree:
304 break;
305
306 case Mutator:
307 case Collector:
308 case OldCollector:
309 {
310 size_t capacity = _free_set->alloc_capacity(i);
311 bool is_empty = (capacity == _region_size_bytes);
312 assert(capacity > 0, "free regions must have allocation capacity");
313 if (i < leftmosts[set]) {
314 leftmosts[set] = i;
315 }
316 if (is_empty && (i < empty_leftmosts[set])) {
317 empty_leftmosts[set] = i;
318 }
319 if (i > rightmosts[set]) {
320 rightmosts[set] = i;
321 }
322 if (is_empty && (i > empty_rightmosts[set])) {
323 empty_rightmosts[set] = i;
324 }
325 break;
326 }
327
328 case NumFreeSets:
329 default:
330 ShouldNotReachHere();
331 }
332 }
333
334 // Performance invariants. Failing these would not break the free set, but performance would suffer.
335 assert (leftmost(Mutator) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Mutator), _max);
336 assert (rightmost(Mutator) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Mutator), _max);
337
338 assert (leftmost(Mutator) == _max || in_free_set(leftmost(Mutator), Mutator),
339 "leftmost region should be free: " SIZE_FORMAT, leftmost(Mutator));
340 assert (leftmost(Mutator) == _max || in_free_set(rightmost(Mutator), Mutator),
341 "rightmost region should be free: " SIZE_FORMAT, rightmost(Mutator));
342
343 // If Mutator set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets.
344 size_t beg_off = leftmosts[Mutator];
345 size_t end_off = rightmosts[Mutator];
346 assert (beg_off >= leftmost(Mutator),
347 "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Mutator));
348 assert (end_off <= rightmost(Mutator),
349 "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(Mutator));
350
351 beg_off = empty_leftmosts[Mutator];
352 end_off = empty_rightmosts[Mutator];
353 assert (beg_off >= leftmost_empty(Mutator),
354 "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Mutator));
355 assert (end_off <= rightmost_empty(Mutator),
356 "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(Mutator));
357
358 // Performance invariants. Failing these would not break the free set, but performance would suffer.
359 assert (leftmost(Collector) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Collector), _max);
360 assert (rightmost(Collector) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Collector), _max);
361
362 assert (leftmost(Collector) == _max || in_free_set(leftmost(Collector), Collector),
363 "leftmost region should be free: " SIZE_FORMAT, leftmost(Collector));
364 assert (leftmost(Collector) == _max || in_free_set(rightmost(Collector), Collector),
365 "rightmost region should be free: " SIZE_FORMAT, rightmost(Collector));
366
367 // If Collector set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets.
368 beg_off = leftmosts[Collector];
369 end_off = rightmosts[Collector];
370 assert (beg_off >= leftmost(Collector),
371 "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Collector));
372 assert (end_off <= rightmost(Collector),
373 "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(Collector));
374
375 beg_off = empty_leftmosts[Collector];
376 end_off = empty_rightmosts[Collector];
377 assert (beg_off >= leftmost_empty(Collector),
378 "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Collector));
379 assert (end_off <= rightmost_empty(Collector),
380 "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(Collector));
381
382 // Performance invariants. Failing these would not break the free set, but performance would suffer.
383 assert (leftmost(OldCollector) <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, leftmost(OldCollector), _max);
384 assert (rightmost(OldCollector) < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, rightmost(OldCollector), _max);
385
386 assert (leftmost(OldCollector) == _max || in_free_set(leftmost(OldCollector), OldCollector),
387 "leftmost region should be free: " SIZE_FORMAT, leftmost(OldCollector));
388 assert (leftmost(OldCollector) == _max || in_free_set(rightmost(OldCollector), OldCollector),
389 "rightmost region should be free: " SIZE_FORMAT, rightmost(OldCollector));
390
391 // If OldCollector set is empty, leftmosts will both equal max, rightmosts will both equal zero. Likewise for empty region sets.
392 beg_off = leftmosts[OldCollector];
393 end_off = rightmosts[OldCollector];
394 assert (beg_off >= leftmost(OldCollector),
395 "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(OldCollector));
396 assert (end_off <= rightmost(OldCollector),
397 "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost(OldCollector));
398
399 beg_off = empty_leftmosts[OldCollector];
400 end_off = empty_rightmosts[OldCollector];
401 assert (beg_off >= leftmost_empty(OldCollector),
402 "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(OldCollector));
403 assert (end_off <= rightmost_empty(OldCollector),
404 "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, rightmost_empty(OldCollector));
405 }
406 #endif
407
408 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
409 _heap(heap),
410 _free_sets(max_regions, this)
411 {
412 clear_internal();
413 }
414
415 // This allocates from a region within the old_collector_set. If affiliation equals OLD, the allocation must be taken
416 // from a region that is_old(). Otherwise, affiliation should be FREE, in which case this will put a previously unaffiliated
417 // region into service.
418 HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation affiliation,
419 ShenandoahAllocRequest& req, bool& in_new_region) {
420 shenandoah_assert_heaplocked();
421
422 size_t rightmost =
423 (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(OldCollector): _free_sets.rightmost(OldCollector);
424 size_t leftmost =
425 (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(OldCollector): _free_sets.leftmost(OldCollector);
426 if (_free_sets.alloc_from_left_bias(OldCollector)) {
427 // This mode picks up stragglers left by a full GC
428 for (size_t idx = leftmost; idx <= rightmost; idx++) {
429 if (_free_sets.in_free_set(idx, OldCollector)) {
430 ShenandoahHeapRegion* r = _heap->get_region(idx);
431 assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
432 if (r->affiliation() == affiliation) {
433 HeapWord* result = try_allocate_in(r, req, in_new_region);
434 if (result != nullptr) {
435 return result;
436 }
437 }
438 }
439 }
440 } else {
441 // This mode picks up stragglers left by a previous concurrent GC
442 for (size_t count = rightmost + 1; count > leftmost; count--) {
443 // size_t is unsigned, need to dodge underflow when _leftmost = 0
444 size_t idx = count - 1;
445 if (_free_sets.in_free_set(idx, OldCollector)) {
446 ShenandoahHeapRegion* r = _heap->get_region(idx);
447 assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
448 if (r->affiliation() == affiliation) {
449 HeapWord* result = try_allocate_in(r, req, in_new_region);
450 if (result != nullptr) {
451 return result;
452 }
453 }
454 }
455 }
456 }
457 return nullptr;
458 }
459
460 void ShenandoahFreeSet::add_old_collector_free_region(ShenandoahHeapRegion* region) {
461 shenandoah_assert_heaplocked();
462 size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
463 size_t idx = region->index();
464 size_t capacity = alloc_capacity(region);
465 assert(_free_sets.membership(idx) == NotFree, "Regions promoted in place should not be in any free set");
466 if (capacity >= plab_min_size_in_bytes) {
467 _free_sets.make_free(idx, OldCollector, capacity);
468 _heap->old_generation()->augment_promoted_reserve(capacity);
469 }
470 }
471
472 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation,
473 ShenandoahAllocRequest& req, bool& in_new_region) {
474 shenandoah_assert_heaplocked();
475 size_t rightmost =
476 (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(Collector): _free_sets.rightmost(Collector);
477 size_t leftmost =
478 (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(Collector): _free_sets.leftmost(Collector);
479 for (size_t c = rightmost + 1; c > leftmost; c--) {
480 // size_t is unsigned, need to dodge underflow when _leftmost = 0
481 size_t idx = c - 1;
482 if (_free_sets.in_free_set(idx, Collector)) {
483 ShenandoahHeapRegion* r = _heap->get_region(idx);
484 if (r->affiliation() == affiliation) {
485 HeapWord* result = try_allocate_in(r, req, in_new_region);
486 if (result != nullptr) {
487 return result;
488 }
489 }
490 }
491 }
492 log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
493 shenandoah_affiliation_name(affiliation), p2i(&req));
494 return nullptr;
495 }
496
497 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
498 shenandoah_assert_heaplocked();
499
500 // Scan the bitmap looking for a first fit.
501 //
502 // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
503 // we would find the region to allocate at right away.
504 //
505 // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
506 // go to the end. This makes application allocation faster, because we would clear lots
507 // of regions from the beginning most of the time.
508 //
509 // Free set maintains mutator and collector views, and normally they allocate in their views only,
510 // unless we special cases for stealing and mixed allocations.
511
512 // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
513
514 bool allow_new_region = true;
515 if (_heap->mode()->is_generational()) {
516 switch (req.affiliation()) {
517 case ShenandoahAffiliation::OLD_GENERATION:
518 // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
519 if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
520 allow_new_region = false;
521 }
522 break;
523
524 case ShenandoahAffiliation::YOUNG_GENERATION:
525 // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
526 if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
527 allow_new_region = false;
528 }
529 break;
530
531 case ShenandoahAffiliation::FREE:
532 fatal("Should request affiliation");
533
534 default:
535 ShouldNotReachHere();
536 break;
537 }
538 }
539 switch (req.type()) {
540 case ShenandoahAllocRequest::_alloc_tlab:
541 case ShenandoahAllocRequest::_alloc_shared: {
542 // Try to allocate in the mutator view
543 // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
544 if (!_free_sets.is_empty(Mutator)) {
545 // Use signed idx. Otherwise, loop will never terminate.
546 int leftmost = (int) _free_sets.leftmost(Mutator);
547 for (int idx = (int) _free_sets.rightmost(Mutator); idx >= leftmost; idx--) {
548 ShenandoahHeapRegion* r = _heap->get_region(idx);
549 if (_free_sets.in_free_set(idx, Mutator) && (allow_new_region || r->is_affiliated())) {
550 // try_allocate_in() increases used if the allocation is successful.
551 HeapWord* result;
552 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
553 if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
554 return result;
555 }
556 }
557 }
558 }
559 // There is no recovery. Mutator does not touch collector view at all.
560 break;
561 }
562 case ShenandoahAllocRequest::_alloc_gclab:
563 // GCLABs are for evacuation so we must be in evacuation phase. If this allocation is successful, increment
564 // the relevant evac_expended rather than used value.
565
566 case ShenandoahAllocRequest::_alloc_plab:
567 // PLABs always reside in old-gen and are only allocated during evacuation phase.
568
569 case ShenandoahAllocRequest::_alloc_shared_gc: {
570 if (!_heap->mode()->is_generational()) {
571 // size_t is unsigned, need to dodge underflow when _leftmost = 0
572 // Fast-path: try to allocate in the collector view first
573 for (size_t c = _free_sets.rightmost(Collector) + 1; c > _free_sets.leftmost(Collector); c--) {
574 size_t idx = c - 1;
575 if (_free_sets.in_free_set(idx, Collector)) {
576 HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
577 if (result != nullptr) {
578 return result;
579 }
580 }
581 }
582 } else {
583 // First try to fit into a region that is already in use in the same generation.
584 HeapWord* result;
585 if (req.is_old()) {
586 result = allocate_old_with_affiliation(req.affiliation(), req, in_new_region);
587 } else {
588 result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
589 }
590 if (result != nullptr) {
591 return result;
592 }
593 if (allow_new_region) {
594 // Then try a free region that is dedicated to GC allocations.
595 if (req.is_old()) {
596 result = allocate_old_with_affiliation(FREE, req, in_new_region);
597 } else {
598 result = allocate_with_affiliation(FREE, req, in_new_region);
599 }
600 if (result != nullptr) {
601 return result;
602 }
603 }
604 }
605 // No dice. Can we borrow space from mutator view?
606 if (!ShenandoahEvacReserveOverflow) {
607 return nullptr;
608 }
609
610 if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
611 // This allows us to flip a mutator region to old_collector
612 allow_new_region = true;
613 }
614
615 // We should expand old-gen if this can prevent an old-gen evacuation failure. We don't care so much about
616 // promotion failures since they can be mitigated in a subsequent GC pass. Would be nice to know if this
617 // allocation request is for evacuation or promotion. Individual threads limit their use of PLAB memory for
618 // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
619 // only for old-gen evacuations.
620
621 // Also TODO:
622 // if (GC is idle (out of cycle) and mutator allocation fails and there is memory reserved in Collector
623 // or OldCollector sets, transfer a region of memory so that we can satisfy the allocation request, and
624 // immediately trigger the start of GC. Is better to satisfy the allocation than to trigger out-of-cycle
625 // allocation failure (even if this means we have a little less memory to handle evacuations during the
626 // subsequent GC pass).
627
628 if (allow_new_region) {
629 // Try to steal an empty region from the mutator view.
630 for (size_t c = _free_sets.rightmost_empty(Mutator) + 1; c > _free_sets.leftmost_empty(Mutator); c--) {
631 size_t idx = c - 1;
632 if (_free_sets.in_free_set(idx, Mutator)) {
633 ShenandoahHeapRegion* r = _heap->get_region(idx);
634 if (can_allocate_from(r)) {
635 if (req.is_old()) {
636 flip_to_old_gc(r);
637 } else {
638 flip_to_gc(r);
639 }
640 HeapWord *result = try_allocate_in(r, req, in_new_region);
641 if (result != nullptr) {
642 log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
643 return result;
644 }
645 }
646 }
647 }
648 }
649
650 // No dice. Do not try to mix mutator and GC allocations, because
651 // URWM moves due to GC allocations would expose unparsable mutator
652 // allocations.
653 break;
654 }
655 default:
656 ShouldNotReachHere();
657 }
658 return nullptr;
659 }
660
661 // This work method takes an argument corresponding to the number of bytes
662 // free in a region, and returns the largest amount in heapwords that can be allocated
663 // such that both of the following conditions are satisfied:
664 //
665 // 1. it is a multiple of card size
666 // 2. any remaining shard may be filled with a filler object
667 //
668 // The idea is that the allocation starts and ends at card boundaries. Because
669 // a region ('s end) is card-aligned, the remainder shard that must be filled is
670 // at the start of the free space.
671 //
672 // This is merely a helper method to use for the purpose of such a calculation.
673 size_t get_usable_free_words(size_t free_bytes) {
674 // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
675 // free is 514
676 // usable_free is 512, which is decreased to 0
677 size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
678 assert(usable_free <= free_bytes, "Sanity check");
679 if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
680 // After aligning to card multiples, the remainder would be smaller than
681 // the minimum filler object, so we'll need to take away another card's
682 // worth to construct a filler object.
683 if (usable_free >= CardTable::card_size()) {
684 usable_free -= CardTable::card_size();
685 } else {
686 assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
687 }
688 }
689
690 return usable_free / HeapWordSize;
691 }
692
693 // Given a size argument, which is a multiple of card size, a request struct
694 // for a PLAB, and an old region, return a pointer to the allocated space for
695 // a PLAB which is card-aligned and where any remaining shard in the region
696 // has been suitably filled by a filler object.
697 // It is assumed (and assertion-checked) that such an allocation is always possible.
698 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
699 assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
700 assert(r->is_old(), "All PLABs reside in old-gen");
701 assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
702 assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design");
703
704 HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
705 assert(result != nullptr, "Allocation cannot fail");
706 assert(r->top() <= r->end(), "Allocation cannot span end of region");
707 assert(req.actual_size() == size, "Should not have needed to adjust size for PLAB.");
708 assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
709
710 return result;
711 }
712
713 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
714 assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
715 if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
716 return nullptr;
717 }
718
719 try_recycle_trashed(r);
720 if (!r->is_affiliated()) {
721 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
722 r->set_affiliation(req.affiliation());
723 if (r->is_old()) {
724 // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
725 // all objects allocated within this region are above TAMS (and thus are implicitly marked). In case this is an
726 // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
727 // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
728 // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
729 // coalesce-and-fill processing.
730 r->end_preemptible_coalesce_and_fill();
731 _heap->clear_cards_for(r);
732 _heap->old_generation()->increment_affiliated_region_count();
733 } else {
734 _heap->young_generation()->increment_affiliated_region_count();
735 }
736
737 assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
738 assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
739 } else if (r->affiliation() != req.affiliation()) {
740 assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
741 req.affiliation_name(), r->affiliation_name());
742 return nullptr;
743 }
744
745 in_new_region = r->is_empty();
746 HeapWord* result = nullptr;
747
748 if (in_new_region) {
749 log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
750 r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
751 }
752
753 // req.size() is in words, r->free() is in bytes.
754 if (req.is_lab_alloc()) {
755 if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
756 assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
757 assert(_free_sets.in_free_set(r->index(), OldCollector), "PLABS must be allocated in old_collector_free regions");
758 // Need to assure that plabs are aligned on multiple of card region.
759 // Since we have Elastic TLABs, align sizes up. They may be decreased to fit in the usable
760 // memory remaining in the region (which will also be aligned to cards).
761 size_t adjusted_size = align_up(req.size(), CardTable::card_size_in_words());
762 size_t adjusted_min_size = align_up(req.min_size(), CardTable::card_size_in_words());
763 size_t usable_free = get_usable_free_words(r->free());
764
765 if (adjusted_size > usable_free) {
766 adjusted_size = usable_free;
767 }
768
769 if (adjusted_size >= adjusted_min_size) {
770 result = allocate_aligned_plab(adjusted_size, req, r);
771 }
772 // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
773 } else {
774 // This is a GCLAB or a TLAB allocation
775 size_t adjusted_size = req.size();
776 size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
777 if (adjusted_size > free) {
778 adjusted_size = free;
779 }
780 if (adjusted_size >= req.min_size()) {
781 result = r->allocate(adjusted_size, req);
782 assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
783 req.set_actual_size(adjusted_size);
784 } else {
785 log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
786 " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
787 }
788 }
789 } else {
790 size_t size = req.size();
791 result = r->allocate(size, req);
792 if (result != nullptr) {
793 // Record actual allocation size
794 req.set_actual_size(size);
795 }
796 }
797
798 ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
799 if (result != nullptr) {
800 // Allocation successful, bump stats:
801 if (req.is_mutator_alloc()) {
802 assert(req.is_young(), "Mutator allocations always come from young generation.");
803 _free_sets.increase_used(Mutator, req.actual_size() * HeapWordSize);
804 } else {
805 assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
806
807 // For GC allocations, we advance update_watermark because the objects relocated into this memory during
808 // evacuation are not updated during evacuation. For both young and old regions r, it is essential that all
809 // PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation.
810 // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
811 // that the PLAB be disabled for all future purposes. We may want to introduce a new service to make the
812 // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
813 // next evacuation pass.
814 r->set_update_watermark(r->top());
815 if (r->is_old()) {
816 assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
817 // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
818 }
819 }
820 }
821
822 if (result == nullptr || alloc_capacity(r) < PLAB::min_size() * HeapWordSize) {
823 // Region cannot afford this and is likely to not afford future allocations. Retire it.
824 //
825 // While this seems a bit harsh, especially in the case when this large allocation does not
826 // fit but the next small one would, we are risking to inflate scan times when lots of
827 // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.
828
829 // Record the remainder as allocation waste
830 size_t idx = r->index();
831 if (req.is_mutator_alloc()) {
832 size_t waste = r->free();
833 if (waste > 0) {
834 _free_sets.increase_used(Mutator, waste);
835 // This one request could cause several regions to be "retired", so we must accumulate the waste
836 req.set_waste((waste >> LogHeapWordSize) + req.waste());
837 }
838 assert(_free_sets.membership(idx) == Mutator, "Must be mutator free: " SIZE_FORMAT, idx);
839 } else {
840 assert(_free_sets.membership(idx) == Collector || _free_sets.membership(idx) == OldCollector,
841 "Must be collector or old-collector free: " SIZE_FORMAT, idx);
842 }
843 // This region is no longer considered free (in any set)
844 _free_sets.remove_from_free_sets(idx);
845 _free_sets.assert_bounds();
846 }
847 return result;
848 }
849
850 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
851 shenandoah_assert_heaplocked();
852
853 size_t words_size = req.size();
854 size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
855
856 assert(req.is_young(), "Humongous regions always allocated in YOUNG");
857 ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
858
859 // Check if there are enough regions left to satisfy allocation.
860 if (_heap->mode()->is_generational()) {
861 size_t avail_young_regions = generation->free_unaffiliated_regions();
862 if (num > _free_sets.count(Mutator) || (num > avail_young_regions)) {
863 return nullptr;
864 }
865 } else {
866 if (num > _free_sets.count(Mutator)) {
867 return nullptr;
868 }
869 }
870
871 // Find the continuous interval of $num regions, starting from $beg and ending in $end,
872 // inclusive. Contiguous allocations are biased to the beginning.
873
874 size_t beg = _free_sets.leftmost(Mutator);
875 size_t end = beg;
876
877 while (true) {
878 if (end >= _free_sets.max()) {
879 // Hit the end, goodbye
880 return nullptr;
881 }
882
883 // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
884 // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
885 if (!_free_sets.in_free_set(end, Mutator) || !can_allocate_from(_heap->get_region(end))) {
886 end++;
887 beg = end;
888 continue;
889 }
890
891 if ((end - beg + 1) == num) {
892 // found the match
893 break;
894 }
895
896 end++;
897 }
898
899 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
900 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
901
902 // Initialize regions:
903 for (size_t i = beg; i <= end; i++) {
904 ShenandoahHeapRegion* r = _heap->get_region(i);
905 try_recycle_trashed(r);
906
907 assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
908 assert(r->is_empty(), "Should be empty");
909
910 if (i == beg) {
911 r->make_humongous_start();
912 } else {
913 r->make_humongous_cont();
914 }
915
916 // Trailing region may be non-full, record the remainder there
917 size_t used_words;
918 if ((i == end) && (remainder != 0)) {
919 used_words = remainder;
920 } else {
921 used_words = ShenandoahHeapRegion::region_size_words();
922 }
923
924 r->set_affiliation(req.affiliation());
925 r->set_update_watermark(r->bottom());
926 r->set_top(r->bottom() + used_words);
927
928 // While individual regions report their true use, all humongous regions are marked used in the free set.
929 _free_sets.remove_from_free_sets(r->index());
930 }
931 _heap->young_generation()->increase_affiliated_region_count(num);
932
933 size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
934 _free_sets.increase_used(Mutator, total_humongous_size);
935 _free_sets.assert_bounds();
936 req.set_actual_size(words_size);
937 if (remainder != 0) {
938 req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
939 }
940 return _heap->get_region(beg)->bottom();
941 }
942
943 // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
944 // immediate trash and we'll be able to immediately recycle it. Note that we cannot recycle immediate trash if
945 // concurrent weak root processing is in progress.
946 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
947 return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
948 }
949
950 bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
951 ShenandoahHeapRegion* r = _heap->get_region(idx);
952 return can_allocate_from(r);
953 }
954
955 size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
956 ShenandoahHeapRegion* r = _heap->get_region(idx);
957 return alloc_capacity(r);
958 }
959
960 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
961 if (r->is_trash()) {
962 // This would be recycled on allocation path
963 return ShenandoahHeapRegion::region_size_bytes();
964 } else {
965 return r->free();
966 }
967 }
968
969 bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
970 return alloc_capacity(r) > 0;
971 }
972
973 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
974 if (r->is_trash()) {
975 r->recycle();
976 }
977 }
978
979 void ShenandoahFreeSet::recycle_trash() {
980 // lock is not reentrable, check we don't have it
981 shenandoah_assert_not_heaplocked();
982
983 for (size_t i = 0; i < _heap->num_regions(); i++) {
984 ShenandoahHeapRegion* r = _heap->get_region(i);
985 if (r->is_trash()) {
986 ShenandoahHeapLocker locker(_heap->lock());
987 try_recycle_trashed(r);
988 }
989 SpinPause(); // allow allocators to take the lock
990 }
991 }
992
993 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
994 size_t idx = r->index();
995
996 assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
997 // Note: can_allocate_from(r) means r is entirely empty
998 assert(can_allocate_from(r), "Should not be allocated");
999
1000 size_t region_capacity = alloc_capacity(r);
1001 _free_sets.move_to_set(idx, OldCollector, region_capacity);
1002 _free_sets.assert_bounds();
1003 _heap->old_generation()->augment_evacuation_reserve(region_capacity);
1004 bool transferred = _heap->generation_sizer()->transfer_to_old(1);
1005 if (!transferred) {
1006 log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx);
1007 _heap->generation_sizer()->force_transfer_to_old(1);
1008 }
1009 // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1010 // to recycle trash before attempting to allocate anything in the region.
1011 }
1012
1013 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1014 size_t idx = r->index();
1015
1016 assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
1017 assert(can_allocate_from(r), "Should not be allocated");
1018
1019 size_t region_capacity = alloc_capacity(r);
1020 _free_sets.move_to_set(idx, Collector, region_capacity);
1021 _free_sets.assert_bounds();
1022
1023 // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1024 // to recycle trash before attempting to allocate anything in the region.
1025 }
1026
1027 void ShenandoahFreeSet::clear() {
1028 shenandoah_assert_heaplocked();
1029 clear_internal();
1030 }
1031
1032 void ShenandoahFreeSet::clear_internal() {
1033 _free_sets.clear_all();
1034 }
1035
1036 // This function places all is_old() regions that have allocation capacity into the old_collector set. It places
1037 // all other regions (not is_old()) that have allocation capacity into the mutator_set. Subsequently, we will
1038 // move some of the mutator regions into the collector set or old_collector set with the intent of packing
1039 // old_collector memory into the highest (rightmost) addresses of the heap and the collector memory into the
1040 // next highest addresses of the heap, with mutator memory consuming the lowest addresses of the heap.
1041 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
1042 size_t &first_old_region, size_t &last_old_region,
1043 size_t &old_region_count) {
1044 first_old_region = _heap->num_regions();
1045 last_old_region = 0;
1046 old_region_count = 0;
1047 old_cset_regions = 0;
1048 young_cset_regions = 0;
1049 for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
1050 ShenandoahHeapRegion* region = _heap->get_region(idx);
1051 if (region->is_trash()) {
1052 // Trashed regions represent regions that had been in the collection set but have not yet been "cleaned up".
1053 if (region->is_old()) {
1054 old_cset_regions++;
1055 } else {
1056 assert(region->is_young(), "Trashed region should be old or young");
1057 young_cset_regions++;
1058 }
1059 } else if (region->is_old() && region->is_regular()) {
1060 old_region_count++;
1061 if (first_old_region > idx) {
1062 first_old_region = idx;
1063 }
1064 last_old_region = idx;
1065 }
1066 if (region->is_alloc_allowed() || region->is_trash()) {
1067 assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1068 assert(_free_sets.in_free_set(idx, NotFree), "We are about to make region free; it should not be free already");
1069
1070 // Do not add regions that would almost surely fail allocation. Note that PLAB::min_size() is typically less than ShenandoahGenerationalHeap::plab_min_size()
1071 if (alloc_capacity(region) < PLAB::min_size() * HeapWordSize) continue;
1072
1073 if (region->is_old()) {
1074 _free_sets.make_free(idx, OldCollector, alloc_capacity(region));
1075 log_debug(gc, free)(
1076 " Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to old collector set",
1077 idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1078 byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1079 } else {
1080 _free_sets.make_free(idx, Mutator, alloc_capacity(region));
1081 log_debug(gc, free)(
1082 " Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator set",
1083 idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
1084 byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
1085 }
1086 }
1087 }
1088 }
1089
1090 // Move no more than cset_regions from the existing Collector and OldCollector free sets to the Mutator free set.
1091 // This is called from outside the heap lock.
1092 void ShenandoahFreeSet::move_collector_sets_to_mutator(size_t max_xfer_regions) {
1093 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1094 size_t collector_empty_xfer = 0;
1095 size_t collector_not_empty_xfer = 0;
1096 size_t old_collector_empty_xfer = 0;
1097
1098 // Process empty regions within the Collector free set
1099 if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(Collector) <= _free_sets.rightmost_empty(Collector))) {
1100 ShenandoahHeapLocker locker(_heap->lock());
1101 for (size_t idx = _free_sets.leftmost_empty(Collector);
1102 (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(Collector)); idx++) {
1103 if (_free_sets.in_free_set(idx, Collector) && can_allocate_from(idx)) {
1104 _free_sets.move_to_set(idx, Mutator, region_size_bytes);
1105 max_xfer_regions--;
1106 collector_empty_xfer += region_size_bytes;
1107 }
1108 }
1109 }
1110
1111 // Process empty regions within the OldCollector free set
1112 size_t old_collector_regions = 0;
1113 if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(OldCollector) <= _free_sets.rightmost_empty(OldCollector))) {
1114 ShenandoahHeapLocker locker(_heap->lock());
1115 for (size_t idx = _free_sets.leftmost_empty(OldCollector);
1116 (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(OldCollector)); idx++) {
1117 if (_free_sets.in_free_set(idx, OldCollector) && can_allocate_from(idx)) {
1118 _free_sets.move_to_set(idx, Mutator, region_size_bytes);
1119 max_xfer_regions--;
1120 old_collector_empty_xfer += region_size_bytes;
1121 old_collector_regions++;
1122 }
1123 }
1124 if (old_collector_regions > 0) {
1125 _heap->generation_sizer()->transfer_to_young(old_collector_regions);
1126 }
1127 }
1128
1129 // If there are any non-empty regions within Collector set, we can also move them to the Mutator free set
1130 if ((max_xfer_regions > 0) && (_free_sets.leftmost(Collector) <= _free_sets.rightmost(Collector))) {
1131 ShenandoahHeapLocker locker(_heap->lock());
1132 for (size_t idx = _free_sets.leftmost(Collector); (max_xfer_regions > 0) && (idx <= _free_sets.rightmost(Collector)); idx++) {
1133 size_t alloc_capacity = this->alloc_capacity(idx);
1134 if (_free_sets.in_free_set(idx, Collector) && (alloc_capacity > 0)) {
1135 _free_sets.move_to_set(idx, Mutator, alloc_capacity);
1136 max_xfer_regions--;
1137 collector_not_empty_xfer += alloc_capacity;
1138 }
1139 }
1140 }
1141
1142 size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
1143 size_t total_xfer = collector_xfer + old_collector_empty_xfer;
1144 log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1145 SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1146 byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1147 byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1148 byte_size_in_proper_unit(old_collector_empty_xfer), proper_unit_for_byte_size(old_collector_empty_xfer));
1149 }
1150
1151
1152 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1153 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
1154 size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
1155 shenandoah_assert_heaplocked();
1156 // This resets all state information, removing all regions from all sets.
1157 clear();
1158 log_debug(gc, free)("Rebuilding FreeSet");
1159
1160 // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1161 // mutator set otherwise.
1162 find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
1163 }
1164
1165 void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves) {
1166 shenandoah_assert_heaplocked();
1167 size_t young_reserve(0), old_reserve(0);
1168
1169 if (!_heap->mode()->is_generational()) {
1170 young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1171 old_reserve = 0;
1172 } else {
1173 compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
1174 young_reserve, old_reserve);
1175
1176 }
1177
1178 reserve_regions(young_reserve, old_reserve);
1179 _free_sets.establish_alloc_bias(OldCollector);
1180 _free_sets.assert_bounds();
1181 log_status();
1182 }
1183
1184 void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
1185 size_t& young_reserve_result, size_t& old_reserve_result) const {
1186 const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1187
1188 ShenandoahOldGeneration* const old_generation = _heap->old_generation();
1189 size_t old_available = old_generation->available();
1190 size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions();
1191 ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
1192 size_t young_capacity = young_generation->max_capacity();
1193 size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
1194
1195 // Add in the regions we anticipate to be freed by evacuation of the collection set
1196 old_unaffiliated_regions += old_cset_regions;
1197 old_available += old_cset_regions * region_size_bytes;
1198 young_unaffiliated_regions += young_cset_regions;
1199
1200 // Consult old-region balance to make adjustments to current generation capacities and availability.
1201 // The generation region transfers take place after we rebuild.
1202 const ssize_t old_region_balance = old_generation->get_region_balance();
1203 if (old_region_balance != 0) {
1204 if (old_region_balance > 0) {
1205 assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1206 } else {
1207 assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1208 }
1209
1210 ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
1211 old_available -= xfer_bytes;
1212 old_unaffiliated_regions -= old_region_balance;
1213 young_capacity += xfer_bytes;
1214 young_unaffiliated_regions += old_region_balance;
1215 }
1216
1217 // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1218 // promotions and evacuations. The partition between which old memory is reserved for evacuation and
1219 // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
1220 // each PLAB's available memory.
1221 if (have_evacuation_reserves) {
1222 // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1223 const size_t promoted_reserve = old_generation->get_promoted_reserve();
1224 const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
1225 young_reserve_result = young_generation->get_evacuation_reserve();
1226 old_reserve_result = promoted_reserve + old_evac_reserve;
1227 assert(old_reserve_result <= old_available,
1228 "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1229 promoted_reserve, old_evac_reserve, old_available);
1230 } else {
1231 // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1232 young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
1233 // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1234 // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of
1235 // unaffiliated regions.
1236 old_reserve_result = old_available;
1237 }
1238
1239 // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1240 // free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust
1241 // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1242 // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1243 if (old_reserve_result > _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1244 old_reserve_result = _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes;
1245 }
1246
1247 if (old_reserve_result > young_unaffiliated_regions * region_size_bytes) {
1248 young_reserve_result = young_unaffiliated_regions * region_size_bytes;
1249 }
1250 }
1251
1252 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1253 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1254 // into the collector set or old collector set in order to assure that the memory available for allocations within
1255 // the collector set is at least to_reserve, and the memory available for allocations within the old collector set
1256 // is at least to_reserve_old.
1257 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old) {
1258 for (size_t i = _heap->num_regions(); i > 0; i--) {
1259 size_t idx = i - 1;
1260 ShenandoahHeapRegion* r = _heap->get_region(idx);
1261 if (!_free_sets.in_free_set(idx, Mutator)) {
1262 continue;
1263 }
1264
1265 size_t ac = alloc_capacity(r);
1266 assert (ac > 0, "Membership in free set implies has capacity");
1267 assert (!r->is_old(), "mutator_is_free regions should not be affiliated OLD");
1268
1269 bool move_to_old = _free_sets.capacity_of(OldCollector) < to_reserve_old;
1270 bool move_to_young = _free_sets.capacity_of(Collector) < to_reserve;
1271
1272 if (!move_to_old && !move_to_young) {
1273 // We've satisfied both to_reserve and to_reserved_old
1274 break;
1275 }
1276
1277 if (move_to_old) {
1278 if (r->is_trash() || !r->is_affiliated()) {
1279 // OLD regions that have available memory are already in the old_collector free set
1280 _free_sets.move_to_set(idx, OldCollector, ac);
1281 log_debug(gc, free)(" Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1282 continue;
1283 }
1284 }
1285
1286 if (move_to_young) {
1287 // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1288 // they were entirely empty. I'm not sure I understand the rationale for that. That alternative behavior would
1289 // tend to mix survivor objects with ephemeral objects, making it more difficult to reclaim the memory for the
1290 // ephemeral objects. It also delays aging of regions, causing promotion in place to be delayed.
1291 _free_sets.move_to_set(idx, Collector, ac);
1292 log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1293 }
1294 }
1295
1296 if (LogTarget(Info, gc, free)::is_enabled()) {
1297 size_t old_reserve = _free_sets.capacity_of(OldCollector);
1298 if (old_reserve < to_reserve_old) {
1299 log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
1300 PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
1301 }
1302 size_t young_reserve = _free_sets.capacity_of(Collector);
1303 if (young_reserve < to_reserve) {
1304 log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1305 PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve));
1306 }
1307 }
1308 }
1309
1310 void ShenandoahFreeSet::log_status() {
1311 shenandoah_assert_heaplocked();
1312
1313 #ifdef ASSERT
1314 // Dump of the FreeSet details is only enabled if assertions are enabled
1315 if (LogTarget(Debug, gc, free)::is_enabled()) {
1316 #define BUFFER_SIZE 80
1317 size_t retired_old = 0;
1318 size_t retired_old_humongous = 0;
1319 size_t retired_young = 0;
1320 size_t retired_young_humongous = 0;
1321 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1322 size_t retired_young_waste = 0;
1323 size_t retired_old_waste = 0;
1324 size_t consumed_collector = 0;
1325 size_t consumed_old_collector = 0;
1326 size_t consumed_mutator = 0;
1327 size_t available_old = 0;
1328 size_t available_young = 0;
1329 size_t available_mutator = 0;
1330 size_t available_collector = 0;
1331 size_t available_old_collector = 0;
1332
1333 char buffer[BUFFER_SIZE];
1334 for (uint i = 0; i < BUFFER_SIZE; i++) {
1335 buffer[i] = '\0';
1336 }
1337 log_debug(gc, free)("FreeSet map legend:"
1338 " M:mutator_free C:collector_free O:old_collector_free"
1339 " H:humongous ~:retired old _:retired young");
1340 log_debug(gc, free)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1341 " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
1342 "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
1343 _free_sets.leftmost(Mutator), _free_sets.rightmost(Mutator),
1344 _free_sets.leftmost(Collector), _free_sets.rightmost(Collector),
1345 _free_sets.leftmost(OldCollector), _free_sets.rightmost(OldCollector),
1346 _free_sets.alloc_from_left_bias(OldCollector)? "left to right": "right to left");
1347
1348 for (uint i = 0; i < _heap->num_regions(); i++) {
1349 ShenandoahHeapRegion *r = _heap->get_region(i);
1350 uint idx = i % 64;
1351 if ((i != 0) && (idx == 0)) {
1352 log_debug(gc, free)(" %6u: %s", i-64, buffer);
1353 }
1354 if (_free_sets.in_free_set(i, Mutator)) {
1355 assert(!r->is_old(), "Old regions should not be in mutator_free set");
1356 size_t capacity = alloc_capacity(r);
1357 available_mutator += capacity;
1358 consumed_mutator += region_size_bytes - capacity;
1359 buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
1360 } else if (_free_sets.in_free_set(i, Collector)) {
1361 assert(!r->is_old(), "Old regions should not be in collector_free set");
1362 size_t capacity = alloc_capacity(r);
1363 available_collector += capacity;
1364 consumed_collector += region_size_bytes - capacity;
1365 buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
1366 } else if (_free_sets.in_free_set(i, OldCollector)) {
1367 size_t capacity = alloc_capacity(r);
1368 available_old_collector += capacity;
1369 consumed_old_collector += region_size_bytes - capacity;
1370 buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
1371 } else if (r->is_humongous()) {
1372 if (r->is_old()) {
1373 buffer[idx] = 'H';
1374 retired_old_humongous += region_size_bytes;
1375 } else {
1376 buffer[idx] = 'h';
1377 retired_young_humongous += region_size_bytes;
1378 }
1379 } else {
1380 if (r->is_old()) {
1381 buffer[idx] = '~';
1382 retired_old_waste += alloc_capacity(r);
1383 retired_old += region_size_bytes;
1384 } else {
1385 buffer[idx] = '_';
1386 retired_young_waste += alloc_capacity(r);
1387 retired_young += region_size_bytes;
1388 }
1389 }
1390 }
1391 uint remnant = _heap->num_regions() % 64;
1392 if (remnant > 0) {
1393 buffer[remnant] = '\0';
1394 } else {
1395 remnant = 64;
1396 }
1397 log_debug(gc, free)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1398 size_t total_young = retired_young + retired_young_humongous;
1399 size_t total_old = retired_old + retired_old_humongous;
1400 }
1401 #endif
1402
1403 LogTarget(Info, gc, free) lt;
1404 if (lt.is_enabled()) {
1405 ResourceMark rm;
1406 LogStream ls(lt);
1407
1408 {
1409 size_t last_idx = 0;
1410 size_t max = 0;
1411 size_t max_contig = 0;
1412 size_t empty_contig = 0;
1413
1414 size_t total_used = 0;
1415 size_t total_free = 0;
1416 size_t total_free_ext = 0;
1417
1418 for (size_t idx = _free_sets.leftmost(Mutator); idx <= _free_sets.rightmost(Mutator); idx++) {
1419 if (_free_sets.in_free_set(idx, Mutator)) {
1420 ShenandoahHeapRegion *r = _heap->get_region(idx);
1421 size_t free = alloc_capacity(r);
1422 max = MAX2(max, free);
1423 if (r->is_empty()) {
1424 total_free_ext += free;
1425 if (last_idx + 1 == idx) {
1426 empty_contig++;
1427 } else {
1428 empty_contig = 1;
1429 }
1430 } else {
1431 empty_contig = 0;
1432 }
1433 total_used += r->used();
1434 total_free += free;
1435 max_contig = MAX2(max_contig, empty_contig);
1436 last_idx = idx;
1437 }
1438 }
1439
1440 size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1441 size_t free = capacity() - used();
1442
1443 assert(free == total_free, "Sum of free within mutator regions (" SIZE_FORMAT
1444 ") should match mutator capacity (" SIZE_FORMAT ") minus mutator used (" SIZE_FORMAT ")",
1445 total_free, capacity(), used());
1446
1447 ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1448 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1449 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
1450 byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1451 );
1452
1453 ls.print("Frag: ");
1454 size_t frag_ext;
1455 if (total_free_ext > 0) {
1456 frag_ext = 100 - (100 * max_humongous / total_free_ext);
1457 } else {
1458 frag_ext = 0;
1459 }
1460 ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1461
1462 size_t frag_int;
1463 if (_free_sets.count(Mutator) > 0) {
1464 frag_int = (100 * (total_used / _free_sets.count(Mutator)) / ShenandoahHeapRegion::region_size_bytes());
1465 } else {
1466 frag_int = 0;
1467 }
1468 ls.print(SIZE_FORMAT "%% internal; ", frag_int);
1469 ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
1470 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), _free_sets.count(Mutator));
1471 }
1472
1473 {
1474 size_t max = 0;
1475 size_t total_free = 0;
1476 size_t total_used = 0;
1477
1478 for (size_t idx = _free_sets.leftmost(Collector); idx <= _free_sets.rightmost(Collector); idx++) {
1479 if (_free_sets.in_free_set(idx, Collector)) {
1480 ShenandoahHeapRegion *r = _heap->get_region(idx);
1481 size_t free = alloc_capacity(r);
1482 max = MAX2(max, free);
1483 total_free += free;
1484 total_used += r->used();
1485 }
1486 }
1487 ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1488 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1489 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
1490 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1491 }
1492
1493 if (_heap->mode()->is_generational()) {
1494 size_t max = 0;
1495 size_t total_free = 0;
1496 size_t total_used = 0;
1497
1498 for (size_t idx = _free_sets.leftmost(OldCollector); idx <= _free_sets.rightmost(OldCollector); idx++) {
1499 if (_free_sets.in_free_set(idx, OldCollector)) {
1500 ShenandoahHeapRegion *r = _heap->get_region(idx);
1501 size_t free = alloc_capacity(r);
1502 max = MAX2(max, free);
1503 total_free += free;
1504 total_used += r->used();
1505 }
1506 }
1507 ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
1508 byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
1509 byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
1510 byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
1511 }
1512 }
1513 }
1514
1515 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
1516 shenandoah_assert_heaplocked();
1517
1518 // Allocation request is known to satisfy all memory budgeting constraints.
1519 if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
1520 switch (req.type()) {
1521 case ShenandoahAllocRequest::_alloc_shared:
1522 case ShenandoahAllocRequest::_alloc_shared_gc:
1523 in_new_region = true;
1524 return allocate_contiguous(req);
1525 case ShenandoahAllocRequest::_alloc_plab:
1526 case ShenandoahAllocRequest::_alloc_gclab:
1527 case ShenandoahAllocRequest::_alloc_tlab:
1528 in_new_region = false;
1529 assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
1530 req.size(), ShenandoahHeapRegion::humongous_threshold_words());
1531 return nullptr;
1532 default:
1533 ShouldNotReachHere();
1534 return nullptr;
1535 }
1536 } else {
1537 return allocate_single(req, in_new_region);
1538 }
1539 }
1540
1541 size_t ShenandoahFreeSet::unsafe_peek_free() const {
1542 // Deliberately not locked, this method is unsafe when free set is modified.
1543
1544 for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1545 if (index < _free_sets.max() && _free_sets.in_free_set(index, Mutator)) {
1546 ShenandoahHeapRegion* r = _heap->get_region(index);
1547 if (r->free() >= MinTLABSize) {
1548 return r->free();
1549 }
1550 }
1551 }
1552
1553 // It appears that no regions left
1554 return 0;
1555 }
1556
1557 void ShenandoahFreeSet::print_on(outputStream* out) const {
1558 out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _free_sets.count(Mutator));
1559 for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1560 if (_free_sets.in_free_set(index, Mutator)) {
1561 _heap->get_region(index)->print_on(out);
1562 }
1563 }
1564 out->print_cr("Collector Free Set: " SIZE_FORMAT "", _free_sets.count(Collector));
1565 for (size_t index = _free_sets.leftmost(Collector); index <= _free_sets.rightmost(Collector); index++) {
1566 if (_free_sets.in_free_set(index, Collector)) {
1567 _heap->get_region(index)->print_on(out);
1568 }
1569 }
1570 if (_heap->mode()->is_generational()) {
1571 out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _free_sets.count(OldCollector));
1572 for (size_t index = _free_sets.leftmost(OldCollector); index <= _free_sets.rightmost(OldCollector); index++) {
1573 if (_free_sets.in_free_set(index, OldCollector)) {
1574 _heap->get_region(index)->print_on(out);
1575 }
1576 }
1577 }
1578 }
1579
1580 /*
1581 * Internal fragmentation metric: describes how fragmented the heap regions are.
1582 *
1583 * It is derived as:
1584 *
1585 * sum(used[i]^2, i=0..k)
1586 * IF = 1 - ------------------------------
1587 * C * sum(used[i], i=0..k)
1588 *
1589 * ...where k is the number of regions in computation, C is the region capacity, and
1590 * used[i] is the used space in the region.
1591 *
1592 * The non-linearity causes IF to be lower for the cases where the same total heap
1593 * used is densely packed. For example:
1594 * a) Heap is completely full => IF = 0
1595 * b) Heap is half full, first 50% regions are completely full => IF = 0
1596 * c) Heap is half full, each region is 50% full => IF = 1/2
1597 * d) Heap is quarter full, first 50% regions are completely full => IF = 0
1598 * e) Heap is quarter full, each region is 25% full => IF = 3/4
1599 * f) Heap has one small object per each region => IF =~ 1
1600 */
1601 double ShenandoahFreeSet::internal_fragmentation() {
1602 double squared = 0;
1603 double linear = 0;
1604 int count = 0;
1605
1606 for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1607 if (_free_sets.in_free_set(index, Mutator)) {
1608 ShenandoahHeapRegion* r = _heap->get_region(index);
1609 size_t used = r->used();
1610 squared += used * used;
1611 linear += used;
1612 count++;
1613 }
1614 }
1615
1616 if (count > 0) {
1617 double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
1618 return 1 - s;
1619 } else {
1620 return 0;
1621 }
1622 }
1623
1624 /*
1625 * External fragmentation metric: describes how fragmented the heap is.
1626 *
1627 * It is derived as:
1628 *
1629 * EF = 1 - largest_contiguous_free / total_free
1630 *
1631 * For example:
1632 * a) Heap is completely empty => EF = 0
1633 * b) Heap is completely full => EF = 0
1634 * c) Heap is first-half full => EF = 1/2
1635 * d) Heap is half full, full and empty regions interleave => EF =~ 1
1636 */
1637 double ShenandoahFreeSet::external_fragmentation() {
1638 size_t last_idx = 0;
1639 size_t max_contig = 0;
1640 size_t empty_contig = 0;
1641
1642 size_t free = 0;
1643
1644 for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
1645 if (_free_sets.in_free_set(index, Mutator)) {
1646 ShenandoahHeapRegion* r = _heap->get_region(index);
1647 if (r->is_empty()) {
1648 free += ShenandoahHeapRegion::region_size_bytes();
1649 if (last_idx + 1 == index) {
1650 empty_contig++;
1651 } else {
1652 empty_contig = 1;
1653 }
1654 } else {
1655 empty_contig = 0;
1656 }
1657
1658 max_contig = MAX2(max_contig, empty_contig);
1659 last_idx = index;
1660 }
1661 }
1662
1663 if (free > 0) {
1664 return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
1665 } else {
1666 return 0;
1667 }
1668 }
1669
|