1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "gc/shared/cardTable.hpp"
28 #include "gc/shared/space.hpp"
29 #include "gc/shared/tlab_globals.hpp"
30 #include "gc/shenandoah/shenandoahCardTable.hpp"
31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
32 #include "gc/shenandoah/shenandoahGeneration.hpp"
33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
35 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
38 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
40 #include "jfr/jfrEvents.hpp"
41 #include "memory/allocation.hpp"
42 #include "memory/iterator.inline.hpp"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/atomic.hpp"
47 #include "runtime/globals_extension.hpp"
48 #include "runtime/java.hpp"
49 #include "runtime/mutexLocker.hpp"
50 #include "runtime/os.hpp"
51 #include "runtime/safepoint.hpp"
52 #include "utilities/powerOfTwo.hpp"
53
54 size_t ShenandoahHeapRegion::RegionCount = 0;
55 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
56 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
57 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
58 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
59 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
60 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
61 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
62 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
63
64 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
65 _index(index),
66 _bottom(start),
67 _end(start + RegionSizeWords),
68 _new_top(nullptr),
69 _empty_time(os::elapsedTime()),
70 _top_before_promoted(nullptr),
71 _state(committed ? _empty_committed : _empty_uncommitted),
72 _top(start),
73 _tlab_allocs(0),
74 _gclab_allocs(0),
75 _plab_allocs(0),
76 _live_data(0),
77 _critical_pins(0),
78 _update_watermark(start),
79 _age(0),
80 #ifdef SHENANDOAH_CENSUS_NOISE
81 _youth(0),
82 #endif // SHENANDOAH_CENSUS_NOISE
83 _needs_bitmap_reset(false)
84 {
85
86 assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
87 "invalid space boundaries");
88 if (ZapUnusedHeapArea && committed) {
89 SpaceMangler::mangle_region(MemRegion(_bottom, _end));
90 }
91 _recycling.unset();
92 }
93
94 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
95 stringStream ss;
96 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(state()), method);
97 print_on(&ss);
98 fatal("%s", ss.freeze());
99 }
100
101 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) {
102 shenandoah_assert_heaplocked();
103 reset_age();
104 switch (state()) {
105 case _empty_uncommitted:
106 do_commit();
107 case _empty_committed:
108 assert(this->affiliation() == affiliation, "Region affiliation should already be established");
109 set_state(_regular);
110 case _regular:
111 case _pinned:
112 return;
113 default:
114 report_illegal_transition("regular allocation");
115 }
116 }
117
118 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned. This implements
119 // behavior previously performed as a side effect of make_regular_bypass(). This is used by Full GC in non-generational
120 // modes to transition regions from FREE. Note that all non-free regions in single-generational modes are young.
121 void ShenandoahHeapRegion::make_affiliated_maybe() {
122 shenandoah_assert_heaplocked();
123 assert(!ShenandoahHeap::heap()->mode()->is_generational(), "Only call if non-generational");
124 switch (state()) {
125 case _empty_uncommitted:
126 case _empty_committed:
127 case _cset:
128 case _humongous_start:
129 case _humongous_cont:
130 if (affiliation() != YOUNG_GENERATION) {
131 set_affiliation(YOUNG_GENERATION);
132 }
133 return;
134 case _pinned_cset:
135 case _regular:
136 case _pinned:
137 return;
138 default:
139 assert(false, "Unexpected _state in make_affiliated_maybe");
140 }
141 }
142
143 void ShenandoahHeapRegion::make_regular_bypass() {
144 shenandoah_assert_heaplocked();
145 assert (ShenandoahHeap::heap()->is_full_gc_in_progress() ||
146 ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
147 "Only for STW GC");
148 reset_age();
149 switch (state()) {
150 case _empty_uncommitted:
151 do_commit();
152 case _empty_committed:
153 case _cset:
154 case _humongous_start:
155 case _humongous_cont:
156 set_state(_regular);
157 return;
158 case _pinned_cset:
159 set_state(_pinned);
160 return;
161 case _regular:
162 case _pinned:
163 return;
164 default:
165 report_illegal_transition("regular bypass");
166 }
167 }
168
169 void ShenandoahHeapRegion::make_humongous_start() {
170 shenandoah_assert_heaplocked();
171 reset_age();
172 switch (state()) {
173 case _empty_uncommitted:
174 do_commit();
175 case _empty_committed:
176 set_state(_humongous_start);
177 return;
178 default:
179 report_illegal_transition("humongous start allocation");
180 }
181 }
182
183 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) {
184 shenandoah_assert_heaplocked();
185 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
186 // Don't bother to account for affiliated regions during Full GC. We recompute totals at end.
187 set_affiliation(affiliation);
188 reset_age();
189 switch (state()) {
190 case _empty_committed:
191 case _regular:
192 case _humongous_start:
193 case _humongous_cont:
194 set_state(_humongous_start);
195 return;
196 default:
197 report_illegal_transition("humongous start bypass");
198 }
199 }
200
201 void ShenandoahHeapRegion::make_humongous_cont() {
202 shenandoah_assert_heaplocked();
203 reset_age();
204 switch (state()) {
205 case _empty_uncommitted:
206 do_commit();
207 case _empty_committed:
208 set_state(_humongous_cont);
209 return;
210 default:
211 report_illegal_transition("humongous continuation allocation");
212 }
213 }
214
215 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) {
216 shenandoah_assert_heaplocked();
217 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
218 set_affiliation(affiliation);
219 // Don't bother to account for affiliated regions during Full GC. We recompute totals at end.
220 reset_age();
221 switch (state()) {
222 case _empty_committed:
223 case _regular:
224 case _humongous_start:
225 case _humongous_cont:
226 set_state(_humongous_cont);
227 return;
228 default:
229 report_illegal_transition("humongous continuation bypass");
230 }
231 }
232
233 void ShenandoahHeapRegion::make_pinned() {
234 shenandoah_assert_heaplocked();
235 assert(pin_count() > 0, "Should have pins: %zu", pin_count());
236
237 switch (state()) {
238 case _regular:
239 set_state(_pinned);
240 case _pinned_cset:
241 case _pinned:
242 return;
243 case _humongous_start:
244 set_state(_pinned_humongous_start);
245 case _pinned_humongous_start:
246 return;
247 case _cset:
248 set_state(_pinned_cset);
249 return;
250 default:
251 report_illegal_transition("pinning");
252 }
253 }
254
255 void ShenandoahHeapRegion::make_unpinned() {
256 shenandoah_assert_heaplocked();
257 assert(pin_count() == 0, "Should not have pins: %zu", pin_count());
258
259 switch (state()) {
260 case _pinned:
261 assert(is_affiliated(), "Pinned region should be affiliated");
262 set_state(_regular);
263 return;
264 case _regular:
265 case _humongous_start:
266 return;
267 case _pinned_cset:
268 set_state(_cset);
269 return;
270 case _pinned_humongous_start:
271 set_state(_humongous_start);
272 return;
273 default:
274 report_illegal_transition("unpinning");
275 }
276 }
277
278 void ShenandoahHeapRegion::make_cset() {
279 shenandoah_assert_heaplocked();
280 // Leave age untouched. We need to consult the age when we are deciding whether to promote evacuated objects.
281 switch (state()) {
282 case _regular:
283 set_state(_cset);
284 case _cset:
285 return;
286 default:
287 report_illegal_transition("cset");
288 }
289 }
290
291 void ShenandoahHeapRegion::make_trash() {
292 shenandoah_assert_heaplocked();
293 reset_age();
294 switch (state()) {
295 case _humongous_start:
296 case _humongous_cont:
297 {
298 // Reclaiming humongous regions and reclaim humongous waste. When this region is eventually recycled, we'll reclaim
299 // its used memory. At recycle time, we no longer recognize this as a humongous region.
300 decrement_humongous_waste();
301 }
302 case _cset:
303 // Reclaiming cset regions
304 case _regular:
305 // Immediate region reclaim
306 set_state(_trash);
307 return;
308 default:
309 report_illegal_transition("trashing");
310 }
311 }
312
313 void ShenandoahHeapRegion::make_trash_immediate() {
314 make_trash();
315
316 // On this path, we know there are no marked objects in the region,
317 // tell marking context about it to bypass bitmap resets.
318 assert(ShenandoahHeap::heap()->gc_generation()->is_mark_complete(), "Marking should be complete here.");
319 shenandoah_assert_generations_reconciled();
320 ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
321 }
322
323 void ShenandoahHeapRegion::make_empty() {
324 reset_age();
325 CENSUS_NOISE(clear_youth();)
326 switch (state()) {
327 case _trash:
328 set_state(_empty_committed);
329 _empty_time = os::elapsedTime();
330 return;
331 default:
332 report_illegal_transition("emptying");
333 }
334 }
335
336 void ShenandoahHeapRegion::make_uncommitted() {
337 shenandoah_assert_heaplocked();
338 switch (state()) {
339 case _empty_committed:
340 do_uncommit();
341 set_state(_empty_uncommitted);
342 return;
343 default:
344 report_illegal_transition("uncommiting");
345 }
346 }
347
348 void ShenandoahHeapRegion::make_committed_bypass() {
349 shenandoah_assert_heaplocked();
350 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
351
352 switch (state()) {
353 case _empty_uncommitted:
354 do_commit();
355 set_state(_empty_committed);
356 return;
357 default:
358 report_illegal_transition("commit bypass");
359 }
360 }
361
362 void ShenandoahHeapRegion::reset_alloc_metadata() {
363 _tlab_allocs = 0;
364 _gclab_allocs = 0;
365 _plab_allocs = 0;
366 }
367
368 size_t ShenandoahHeapRegion::get_shared_allocs() const {
369 return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
370 }
371
372 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
373 return _tlab_allocs * HeapWordSize;
374 }
375
376 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
377 return _gclab_allocs * HeapWordSize;
378 }
379
380 size_t ShenandoahHeapRegion::get_plab_allocs() const {
381 return _plab_allocs * HeapWordSize;
382 }
383
384 void ShenandoahHeapRegion::set_live_data(size_t s) {
385 assert(Thread::current()->is_VM_thread(), "by VM thread");
386 _live_data = (s >> LogHeapWordSize);
387 }
388
389 void ShenandoahHeapRegion::print_on(outputStream* st) const {
390 st->print("|");
391 st->print("%5zu", this->_index);
392
393 switch (state()) {
394 case _empty_uncommitted:
395 st->print("|EU ");
396 break;
397 case _empty_committed:
398 st->print("|EC ");
399 break;
400 case _regular:
401 st->print("|R ");
402 break;
403 case _humongous_start:
404 st->print("|H ");
405 break;
406 case _pinned_humongous_start:
407 st->print("|HP ");
408 break;
409 case _humongous_cont:
410 st->print("|HC ");
411 break;
412 case _cset:
413 st->print("|CS ");
414 break;
415 case _trash:
416 st->print("|TR ");
417 break;
418 case _pinned:
419 st->print("|P ");
420 break;
421 case _pinned_cset:
422 st->print("|CSP");
423 break;
424 default:
425 ShouldNotReachHere();
426 }
427
428 st->print("|%s", shenandoah_affiliation_code(affiliation()));
429
430 #define SHR_PTR_FORMAT "%12" PRIxPTR
431
432 st->print("|BTE " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
433 p2i(bottom()), p2i(top()), p2i(end()));
434 st->print("|TAMS " SHR_PTR_FORMAT,
435 p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
436 st->print("|UWM " SHR_PTR_FORMAT,
437 p2i(_update_watermark));
438 st->print("|U %5zu%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
439 st->print("|T %5zu%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs()));
440 st->print("|G %5zu%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs()));
441 if (ShenandoahHeap::heap()->mode()->is_generational()) {
442 st->print("|P %5zu%1s", byte_size_in_proper_unit(get_plab_allocs()), proper_unit_for_byte_size(get_plab_allocs()));
443 }
444 st->print("|S %5zu%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs()));
445 st->print("|L %5zu%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
446 st->print("|CP %3zu", pin_count());
447 st->cr();
448
449 #undef SHR_PTR_FORMAT
450 }
451
452 // oop_iterate without closure, return true if completed without cancellation
453 bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
454
455 assert(!is_humongous(), "No need to fill or coalesce humongous regions");
456 if (!is_active()) {
457 end_preemptible_coalesce_and_fill();
458 return true;
459 }
460
461 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
462 ShenandoahMarkingContext* marking_context = heap->marking_context();
463
464 // Expect marking to be completed before these threads invoke this service.
465 assert(heap->gc_generation()->is_mark_complete(), "sanity");
466 shenandoah_assert_generations_reconciled();
467
468 // All objects above TAMS are considered live even though their mark bits will not be set. Note that young-
469 // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
470 // while the old-gen concurrent marking is ongoing. These newly promoted objects will reside above TAMS
471 // and will be treated as live during the current old-gen marking pass, even though they will not be
472 // explicitly marked.
473 HeapWord* t = marking_context->top_at_mark_start(this);
474
475 // Resume coalesce and fill from this address
476 HeapWord* obj_addr = resume_coalesce_and_fill();
477
478 while (obj_addr < t) {
479 oop obj = cast_to_oop(obj_addr);
480 if (marking_context->is_marked(obj)) {
481 assert(obj->klass() != nullptr, "klass should not be nullptr");
482 obj_addr += obj->size();
483 } else {
484 // Object is not marked. Coalesce and fill dead object with dead neighbors.
485 HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
486 assert(next_marked_obj <= t, "next marked object cannot exceed top");
487 size_t fill_size = next_marked_obj - obj_addr;
488 assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
489 ShenandoahHeap::fill_with_object(obj_addr, fill_size);
490 heap->old_generation()->card_scan()->coalesce_objects(obj_addr, fill_size);
491 obj_addr = next_marked_obj;
492 }
493 if (cancellable && heap->cancelled_gc()) {
494 suspend_coalesce_and_fill(obj_addr);
495 return false;
496 }
497 }
498 // Mark that this region has been coalesced and filled
499 end_preemptible_coalesce_and_fill();
500 return true;
501 }
502
503 size_t get_card_count(size_t words) {
504 assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
505 assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
506 "slice must be integral number of cards");
507 return words / CardTable::card_size_in_words();
508 }
509
510 void ShenandoahHeapRegion::oop_iterate_humongous_slice_dirty(OopIterateClosure* blk,
511 HeapWord* start, size_t words, bool write_table) const {
512 assert(is_humongous(), "only humongous region here");
513
514 ShenandoahHeapRegion* r = humongous_start_region();
515 oop obj = cast_to_oop(r->bottom());
516 size_t num_cards = get_card_count(words);
517
518 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
519 ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
520 size_t card_index = scanner->card_index_for_addr(start);
521 if (write_table) {
522 while (num_cards-- > 0) {
523 if (scanner->is_write_card_dirty(card_index++)) {
524 obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
525 }
526 start += CardTable::card_size_in_words();
527 }
528 } else {
529 while (num_cards-- > 0) {
530 if (scanner->is_card_dirty(card_index++)) {
531 obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
532 }
533 start += CardTable::card_size_in_words();
534 }
535 }
536 }
537
538 void ShenandoahHeapRegion::oop_iterate_humongous_slice_all(OopIterateClosure* cl, HeapWord* start, size_t words) const {
539 assert(is_humongous(), "only humongous region here");
540
541 ShenandoahHeapRegion* r = humongous_start_region();
542 oop obj = cast_to_oop(r->bottom());
543
544 // Scan all data, regardless of whether cards are dirty
545 obj->oop_iterate(cl, MemRegion(start, start + words));
546 }
547
548 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
549 ShenandoahHeap* heap = ShenandoahHeap::heap();
550 assert(is_humongous(), "Must be a part of the humongous region");
551 size_t i = index();
552 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
553 while (!r->is_humongous_start()) {
554 assert(i > 0, "Sanity");
555 i--;
556 r = heap->get_region(i);
557 assert(r->is_humongous(), "Must be a part of the humongous region");
558 }
559 assert(r->is_humongous_start(), "Must be");
560 return r;
561 }
562
563
564 void ShenandoahHeapRegion::recycle_internal() {
565 assert(_recycling.is_set() && is_trash(), "Wrong state");
566 ShenandoahHeap* heap = ShenandoahHeap::heap();
567
568 set_top(bottom());
569 clear_live_data();
570 reset_alloc_metadata();
571 heap->marking_context()->reset_top_at_mark_start(this);
572 set_update_watermark(bottom());
573 if (ZapUnusedHeapArea) {
574 SpaceMangler::mangle_region(MemRegion(bottom(), end()));
575 }
576
577 make_empty();
578 set_affiliation(FREE);
579 }
580
581 void ShenandoahHeapRegion::try_recycle_under_lock() {
582 shenandoah_assert_heaplocked();
583 if (is_trash() && _recycling.try_set()) {
584 if (is_trash()) {
585 ShenandoahHeap* heap = ShenandoahHeap::heap();
586 ShenandoahGeneration* generation = heap->generation_for(affiliation());
587
588 heap->decrease_used(generation, used());
589 generation->decrement_affiliated_region_count();
590
591 recycle_internal();
592 }
593 _recycling.unset();
594 } else {
595 // Ensure recycling is unset before returning to mutator to continue memory allocation.
596 while (_recycling.is_set()) {
597 if (os::is_MP()) {
598 SpinPause();
599 } else {
600 os::naked_yield();
601 }
602 }
603 }
604 }
605
606 void ShenandoahHeapRegion::try_recycle() {
607 shenandoah_assert_not_heaplocked();
608 if (is_trash() && _recycling.try_set()) {
609 // Double check region state after win the race to set recycling flag
610 if (is_trash()) {
611 ShenandoahHeap* heap = ShenandoahHeap::heap();
612 ShenandoahGeneration* generation = heap->generation_for(affiliation());
613 heap->decrease_used(generation, used());
614 generation->decrement_affiliated_region_count_without_lock();
615
616 recycle_internal();
617 }
618 _recycling.unset();
619 }
620 }
621
622 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
623 assert(MemRegion(bottom(), end()).contains(p),
624 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
625 p2i(p), p2i(bottom()), p2i(end()));
626 if (p >= top()) {
627 return top();
628 } else {
629 HeapWord* last = bottom();
630 HeapWord* cur = last;
631 while (cur <= p) {
632 last = cur;
633 cur += cast_to_oop(cur)->size();
634 }
635 shenandoah_assert_correct(nullptr, cast_to_oop(last));
636 return last;
637 }
638 }
639
640 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
641 assert(MemRegion(bottom(), end()).contains(p),
642 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
643 p2i(p), p2i(bottom()), p2i(end()));
644 if (p < top()) {
645 return cast_to_oop(p)->size();
646 } else {
647 assert(p == top(), "just checking");
648 return pointer_delta(end(), (HeapWord*) p);
649 }
650 }
651
652 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
653 // Absolute minimums we should not ever break.
654 static const size_t MIN_REGION_SIZE = 256*K;
655
656 if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
657 FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
658 }
659
660 // Generational Shenandoah needs this alignment for card tables.
661 if (strcmp(ShenandoahGCMode, "generational") == 0) {
662 max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint());
663 }
664
665 size_t region_size;
666 if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
667 if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
668 err_msg message("Max heap size (%zu%s) is too low to afford the minimum number "
669 "of regions (%zu) of minimum region size (%zu%s).",
670 byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
671 MIN_NUM_REGIONS,
672 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
673 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
674 }
675 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
676 err_msg message("%zu%s should not be lower than minimum region size (%zu%s).",
677 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
678 byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
679 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
680 }
681 if (ShenandoahMinRegionSize < MinTLABSize) {
682 err_msg message("%zu%s should not be lower than TLAB size size (%zu%s).",
683 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
684 byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize));
685 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
686 }
687 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
688 err_msg message("%zu%s should not be lower than min region size (%zu%s).",
689 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
690 byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
691 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
692 }
693 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
694 err_msg message("Minimum (%zu%s) should be larger than maximum (%zu%s).",
695 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
696 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
697 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
698 }
699
700 // We rapidly expand to max_heap_size in most scenarios, so that is the measure
701 // for usual heap sizes. Do not depend on initial_heap_size here.
702 region_size = max_heap_size / ShenandoahTargetNumRegions;
703
704 // Now make sure that we don't go over or under our limits.
705 region_size = MAX2(ShenandoahMinRegionSize, region_size);
706 region_size = MIN2(ShenandoahMaxRegionSize, region_size);
707
708 } else {
709 if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
710 err_msg message("Max heap size (%zu%s) is too low to afford the minimum number "
711 "of regions (%zu) of requested size (%zu%s).",
712 byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
713 MIN_NUM_REGIONS,
714 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
715 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
716 }
717 if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
718 err_msg message("Heap region size (%zu%s) should be larger than min region size (%zu%s).",
719 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
720 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
721 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
722 }
723 if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
724 err_msg message("Heap region size (%zu%s) should be lower than max region size (%zu%s).",
725 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
726 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
727 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
728 }
729 region_size = ShenandoahRegionSize;
730 }
731
732 // Make sure region size and heap size are page aligned.
733 // If large pages are used, we ensure that region size is aligned to large page size if
734 // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
735 // region size to regular page size.
736
737 // Figure out page size to use, and aligns up heap to page size
738 size_t page_size = os::vm_page_size();
739 if (UseLargePages) {
740 size_t large_page_size = os::large_page_size();
741 max_heap_size = align_up(max_heap_size, large_page_size);
742 if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
743 page_size = large_page_size;
744 } else {
745 // Should have been checked during argument initialization
746 assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
747 }
748 } else {
749 max_heap_size = align_up(max_heap_size, page_size);
750 }
751
752 // Align region size to page size
753 region_size = align_up(region_size, page_size);
754
755 int region_size_log = log2i(region_size);
756 // Recalculate the region size to make sure it's a power of
757 // 2. This means that region_size is the largest power of 2 that's
758 // <= what we've calculated so far.
759 region_size = size_t(1) << region_size_log;
760
761 // Now, set up the globals.
762 guarantee(RegionSizeBytesShift == 0, "we should only set it once");
763 RegionSizeBytesShift = (size_t)region_size_log;
764
765 guarantee(RegionSizeWordsShift == 0, "we should only set it once");
766 RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
767
768 guarantee(RegionSizeBytes == 0, "we should only set it once");
769 RegionSizeBytes = region_size;
770 RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
771 assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
772
773 guarantee(RegionSizeWordsMask == 0, "we should only set it once");
774 RegionSizeWordsMask = RegionSizeWords - 1;
775
776 guarantee(RegionSizeBytesMask == 0, "we should only set it once");
777 RegionSizeBytesMask = RegionSizeBytes - 1;
778
779 guarantee(RegionCount == 0, "we should only set it once");
780 RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
781 guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
782
783 guarantee(MaxTLABSizeWords == 0, "we should only set it once");
784 MaxTLABSizeWords = align_down(RegionSizeWords, MinObjAlignment);
785
786 guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
787 MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
788 assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
789
790 return max_heap_size;
791 }
792
793 void ShenandoahHeapRegion::do_commit() {
794 ShenandoahHeap* heap = ShenandoahHeap::heap();
795 if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
796 report_java_out_of_memory("Unable to commit region");
797 }
798 if (!heap->commit_bitmap_slice(this)) {
799 report_java_out_of_memory("Unable to commit bitmaps for region");
800 }
801 if (AlwaysPreTouch) {
802 os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
803 }
804 heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
805 }
806
807 void ShenandoahHeapRegion::do_uncommit() {
808 ShenandoahHeap* heap = ShenandoahHeap::heap();
809 if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
810 report_java_out_of_memory("Unable to uncommit region");
811 }
812 if (!heap->uncommit_bitmap_slice(this)) {
813 report_java_out_of_memory("Unable to uncommit bitmaps for region");
814 }
815 heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
816 }
817
818 void ShenandoahHeapRegion::set_state(RegionState to) {
819 EventShenandoahHeapRegionStateChange evt;
820 if (evt.should_commit()){
821 evt.set_index((unsigned) index());
822 evt.set_start((uintptr_t)bottom());
823 evt.set_used(used());
824 evt.set_from(state());
825 evt.set_to(to);
826 evt.commit();
827 }
828 Atomic::store(&_state, to);
829 }
830
831 void ShenandoahHeapRegion::record_pin() {
832 Atomic::add(&_critical_pins, (size_t)1);
833 }
834
835 void ShenandoahHeapRegion::record_unpin() {
836 assert(pin_count() > 0, "Region %zu should have non-zero pins", index());
837 Atomic::sub(&_critical_pins, (size_t)1);
838 }
839
840 size_t ShenandoahHeapRegion::pin_count() const {
841 return Atomic::load(&_critical_pins);
842 }
843
844 void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) {
845 ShenandoahHeap* heap = ShenandoahHeap::heap();
846
847 ShenandoahAffiliation region_affiliation = heap->region_affiliation(this);
848 ShenandoahMarkingContext* const ctx = heap->marking_context();
849 {
850 log_debug(gc)("Setting affiliation of Region %zu from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
851 ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
852 index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation),
853 p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
854 }
855
856 #ifdef ASSERT
857 {
858 size_t idx = this->index();
859 HeapWord* top_bitmap = ctx->top_bitmap(this);
860
861 assert(ctx->is_bitmap_range_within_region_clear(top_bitmap, _end),
862 "Region %zu, bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
863 p2i(top_bitmap), p2i(_end));
864 }
865 #endif
866
867 if (region_affiliation == new_affiliation) {
868 return;
869 }
870
871 if (!heap->mode()->is_generational()) {
872 log_trace(gc)("Changing affiliation of region %zu from %s to %s",
873 index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation));
874 heap->set_affiliation(this, new_affiliation);
875 return;
876 }
877
878 switch (new_affiliation) {
879 case FREE:
880 assert(!has_live(), "Free region should not have live data");
881 break;
882 case YOUNG_GENERATION:
883 reset_age();
884 break;
885 case OLD_GENERATION:
886 break;
887 default:
888 ShouldNotReachHere();
889 return;
890 }
891 heap->set_affiliation(this, new_affiliation);
892 }
893
894 void ShenandoahHeapRegion::decrement_humongous_waste() const {
895 assert(is_humongous(), "Should only use this for humongous regions");
896 size_t waste_bytes = free();
897 if (waste_bytes > 0) {
898 ShenandoahHeap* heap = ShenandoahHeap::heap();
899 ShenandoahGeneration* generation = heap->generation_for(affiliation());
900 heap->decrease_humongous_waste(generation, waste_bytes);
901 }
902 }