1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shared/space.inline.hpp"
28 #include "gc/shared/tlab_globals.hpp"
29 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
33 #include "jfr/jfrEvents.hpp"
34 #include "memory/allocation.hpp"
35 #include "memory/iterator.inline.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/os.hpp"
44 #include "runtime/safepoint.hpp"
45 #include "utilities/powerOfTwo.hpp"
46
47 size_t ShenandoahHeapRegion::RegionCount = 0;
48 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
49 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
50 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
51 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
52 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
53 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
54 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
55 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
56 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
57 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
58
59 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
60 _index(index),
61 _bottom(start),
62 _end(start + RegionSizeWords),
63 _new_top(nullptr),
64 _empty_time(os::elapsedTime()),
65 _state(committed ? _empty_committed : _empty_uncommitted),
66 _top(start),
67 _tlab_allocs(0),
68 _gclab_allocs(0),
69 _live_data(0),
70 _critical_pins(0),
71 _update_watermark(start) {
72
73 assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
74 "invalid space boundaries");
75 if (ZapUnusedHeapArea && committed) {
76 SpaceMangler::mangle_region(MemRegion(_bottom, _end));
77 }
78 }
79
80 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
81 stringStream ss;
82 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method);
83 print_on(&ss);
84 fatal("%s", ss.freeze());
85 }
86
87 void ShenandoahHeapRegion::make_regular_allocation() {
88 shenandoah_assert_heaplocked();
89
90 switch (_state) {
91 case _empty_uncommitted:
92 do_commit();
93 case _empty_committed:
94 set_state(_regular);
95 case _regular:
96 case _pinned:
97 return;
98 default:
99 report_illegal_transition("regular allocation");
100 }
101 }
102
103 void ShenandoahHeapRegion::make_regular_bypass() {
104 shenandoah_assert_heaplocked();
105 assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
106 "only for full or degen GC");
107
108 switch (_state) {
109 case _empty_uncommitted:
110 do_commit();
111 case _empty_committed:
112 case _cset:
113 case _humongous_start:
114 case _humongous_cont:
115 set_state(_regular);
116 return;
117 case _pinned_cset:
118 set_state(_pinned);
119 return;
120 case _regular:
121 case _pinned:
122 return;
123 default:
124 report_illegal_transition("regular bypass");
125 }
126 }
127
128 void ShenandoahHeapRegion::make_humongous_start() {
129 shenandoah_assert_heaplocked();
130 switch (_state) {
131 case _empty_uncommitted:
132 do_commit();
133 case _empty_committed:
134 set_state(_humongous_start);
135 return;
136 default:
137 report_illegal_transition("humongous start allocation");
138 }
139 }
140
141 void ShenandoahHeapRegion::make_humongous_start_bypass() {
142 shenandoah_assert_heaplocked();
143 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
144
145 switch (_state) {
146 case _empty_committed:
147 case _regular:
148 case _humongous_start:
149 case _humongous_cont:
150 set_state(_humongous_start);
151 return;
152 default:
153 report_illegal_transition("humongous start bypass");
154 }
155 }
156
157 void ShenandoahHeapRegion::make_humongous_cont() {
158 shenandoah_assert_heaplocked();
159 switch (_state) {
160 case _empty_uncommitted:
161 do_commit();
162 case _empty_committed:
163 set_state(_humongous_cont);
164 return;
165 default:
166 report_illegal_transition("humongous continuation allocation");
167 }
168 }
169
170 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
171 shenandoah_assert_heaplocked();
172 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
173
174 switch (_state) {
175 case _empty_committed:
176 case _regular:
177 case _humongous_start:
178 case _humongous_cont:
179 set_state(_humongous_cont);
180 return;
181 default:
182 report_illegal_transition("humongous continuation bypass");
183 }
184 }
185
186 void ShenandoahHeapRegion::make_pinned() {
187 shenandoah_assert_heaplocked();
188 assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
189
190 switch (_state) {
191 case _regular:
192 set_state(_pinned);
193 case _pinned_cset:
194 case _pinned:
195 return;
196 case _humongous_start:
197 set_state(_pinned_humongous_start);
198 case _pinned_humongous_start:
199 return;
200 case _cset:
201 _state = _pinned_cset;
202 return;
203 default:
204 report_illegal_transition("pinning");
205 }
206 }
207
208 void ShenandoahHeapRegion::make_unpinned() {
209 shenandoah_assert_heaplocked();
210 assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
211
212 switch (_state) {
213 case _pinned:
214 set_state(_regular);
215 return;
216 case _regular:
217 case _humongous_start:
218 return;
219 case _pinned_cset:
220 set_state(_cset);
221 return;
222 case _pinned_humongous_start:
223 set_state(_humongous_start);
224 return;
225 default:
226 report_illegal_transition("unpinning");
227 }
228 }
229
230 void ShenandoahHeapRegion::make_cset() {
231 shenandoah_assert_heaplocked();
232 switch (_state) {
233 case _regular:
234 set_state(_cset);
235 case _cset:
236 return;
237 default:
238 report_illegal_transition("cset");
239 }
240 }
241
242 void ShenandoahHeapRegion::make_trash() {
243 shenandoah_assert_heaplocked();
244 switch (_state) {
245 case _cset:
246 // Reclaiming cset regions
247 case _humongous_start:
248 case _humongous_cont:
249 // Reclaiming humongous regions
250 case _regular:
251 // Immediate region reclaim
252 set_state(_trash);
253 return;
254 default:
255 report_illegal_transition("trashing");
256 }
257 }
258
259 void ShenandoahHeapRegion::make_trash_immediate() {
260 make_trash();
261
262 // On this path, we know there are no marked objects in the region,
263 // tell marking context about it to bypass bitmap resets.
264 ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);
265 }
266
267 void ShenandoahHeapRegion::make_empty() {
268 shenandoah_assert_heaplocked();
269 switch (_state) {
270 case _trash:
271 set_state(_empty_committed);
272 _empty_time = os::elapsedTime();
273 return;
274 default:
275 report_illegal_transition("emptying");
276 }
277 }
278
279 void ShenandoahHeapRegion::make_uncommitted() {
280 shenandoah_assert_heaplocked();
281 switch (_state) {
282 case _empty_committed:
283 do_uncommit();
284 set_state(_empty_uncommitted);
285 return;
286 default:
287 report_illegal_transition("uncommiting");
288 }
289 }
290
291 void ShenandoahHeapRegion::make_committed_bypass() {
292 shenandoah_assert_heaplocked();
293 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
294
295 switch (_state) {
296 case _empty_uncommitted:
297 do_commit();
298 set_state(_empty_committed);
299 return;
300 default:
301 report_illegal_transition("commit bypass");
302 }
303 }
304
305 void ShenandoahHeapRegion::reset_alloc_metadata() {
306 _tlab_allocs = 0;
307 _gclab_allocs = 0;
308 }
309
310 size_t ShenandoahHeapRegion::get_shared_allocs() const {
311 return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
312 }
313
314 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
315 return _tlab_allocs * HeapWordSize;
316 }
317
318 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
319 return _gclab_allocs * HeapWordSize;
320 }
321
322 void ShenandoahHeapRegion::set_live_data(size_t s) {
323 assert(Thread::current()->is_VM_thread(), "by VM thread");
324 _live_data = (s >> LogHeapWordSize);
325 }
326
327 void ShenandoahHeapRegion::print_on(outputStream* st) const {
328 st->print("|");
329 st->print(SIZE_FORMAT_W(5), this->_index);
330
331 switch (_state) {
332 case _empty_uncommitted:
333 st->print("|EU ");
334 break;
335 case _empty_committed:
336 st->print("|EC ");
337 break;
338 case _regular:
339 st->print("|R ");
340 break;
341 case _humongous_start:
346 break;
347 case _humongous_cont:
348 st->print("|HC ");
349 break;
350 case _cset:
351 st->print("|CS ");
352 break;
353 case _trash:
354 st->print("|T ");
355 break;
356 case _pinned:
357 st->print("|P ");
358 break;
359 case _pinned_cset:
360 st->print("|CSP");
361 break;
362 default:
363 ShouldNotReachHere();
364 }
365
366 #define SHR_PTR_FORMAT "%12" PRIxPTR
367
368 st->print("|BTE " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
369 p2i(bottom()), p2i(top()), p2i(end()));
370 st->print("|TAMS " SHR_PTR_FORMAT,
371 p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
372 st->print("|UWM " SHR_PTR_FORMAT,
373 p2i(_update_watermark));
374 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
375 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs()));
376 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs()));
377 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs()));
378 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
379 st->print("|CP " SIZE_FORMAT_W(3), pin_count());
380 st->cr();
381
382 #undef SHR_PTR_FORMAT
383 }
384
385 void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {
386 if (!is_active()) return;
387 if (is_humongous()) {
388 oop_iterate_humongous(blk);
389 } else {
390 oop_iterate_objects(blk);
391 }
392 }
393
394 void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
395 assert(! is_humongous(), "no humongous region here");
396 HeapWord* obj_addr = bottom();
397 HeapWord* t = top();
398 // Could call objects iterate, but this is easier.
399 while (obj_addr < t) {
400 oop obj = cast_to_oop(obj_addr);
401 obj_addr += obj->oop_iterate_size(blk);
402 }
403 }
404
405 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
406 assert(is_humongous(), "only humongous region here");
407 // Find head.
408 ShenandoahHeapRegion* r = humongous_start_region();
409 assert(r->is_humongous_start(), "need humongous head here");
410 oop obj = cast_to_oop(r->bottom());
411 obj->oop_iterate(blk, MemRegion(bottom(), top()));
412 }
413
414 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
415 ShenandoahHeap* heap = ShenandoahHeap::heap();
416 assert(is_humongous(), "Must be a part of the humongous region");
417 size_t i = index();
418 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
419 while (!r->is_humongous_start()) {
420 assert(i > 0, "Sanity");
421 i--;
422 r = heap->get_region(i);
423 assert(r->is_humongous(), "Must be a part of the humongous region");
424 }
425 assert(r->is_humongous_start(), "Must be");
426 return r;
427 }
428
429 void ShenandoahHeapRegion::recycle() {
430 set_top(bottom());
431 clear_live_data();
432
433 reset_alloc_metadata();
434
435 ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
436 set_update_watermark(bottom());
437
438 make_empty();
439
440 if (ZapUnusedHeapArea) {
441 SpaceMangler::mangle_region(MemRegion(bottom(), end()));
442 }
443 }
444
445 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
446 assert(MemRegion(bottom(), end()).contains(p),
447 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
448 p2i(p), p2i(bottom()), p2i(end()));
449 if (p >= top()) {
450 return top();
451 } else {
452 HeapWord* last = bottom();
453 HeapWord* cur = last;
454 while (cur <= p) {
455 last = cur;
456 cur += cast_to_oop(cur)->size();
457 }
458 shenandoah_assert_correct(nullptr, cast_to_oop(last));
463 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
464 assert(MemRegion(bottom(), end()).contains(p),
465 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
466 p2i(p), p2i(bottom()), p2i(end()));
467 if (p < top()) {
468 return cast_to_oop(p)->size();
469 } else {
470 assert(p == top(), "just checking");
471 return pointer_delta(end(), (HeapWord*) p);
472 }
473 }
474
475 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
476 // Absolute minimums we should not ever break.
477 static const size_t MIN_REGION_SIZE = 256*K;
478
479 if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
480 FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
481 }
482
483 size_t region_size;
484 if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
485 if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
486 err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
487 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
488 byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
489 MIN_NUM_REGIONS,
490 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
491 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
492 }
493 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
494 err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
495 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
496 byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
497 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
498 }
499 if (ShenandoahMinRegionSize < MinTLABSize) {
500 err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
501 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
502 byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize));
669 evt.set_used(used());
670 evt.set_from(_state);
671 evt.set_to(to);
672 evt.commit();
673 }
674 _state = to;
675 }
676
677 void ShenandoahHeapRegion::record_pin() {
678 Atomic::add(&_critical_pins, (size_t)1);
679 }
680
681 void ShenandoahHeapRegion::record_unpin() {
682 assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
683 Atomic::sub(&_critical_pins, (size_t)1);
684 }
685
686 size_t ShenandoahHeapRegion::pin_count() const {
687 return Atomic::load(&_critical_pins);
688 }
|
1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shared/cardTable.hpp"
28 #include "gc/shared/space.inline.hpp"
29 #include "gc/shared/tlab_globals.hpp"
30 #include "gc/shenandoah/shenandoahCardTable.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
34 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
35 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
36 #include "gc/shenandoah/shenandoahGeneration.hpp"
37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
38 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
39 #include "jfr/jfrEvents.hpp"
40 #include "memory/allocation.hpp"
41 #include "memory/iterator.inline.hpp"
42 #include "memory/resourceArea.hpp"
43 #include "memory/universe.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/atomic.hpp"
46 #include "runtime/globals_extension.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "runtime/os.hpp"
50 #include "runtime/safepoint.hpp"
51 #include "utilities/powerOfTwo.hpp"
52
53
54 size_t ShenandoahHeapRegion::RegionCount = 0;
55 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
56 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
57 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
58 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
59 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
60 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
61 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
62 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
63 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
64 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
65
66 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
67 _index(index),
68 _bottom(start),
69 _end(start + RegionSizeWords),
70 _new_top(nullptr),
71 _empty_time(os::elapsedTime()),
72 _state(committed ? _empty_committed : _empty_uncommitted),
73 _top(start),
74 _tlab_allocs(0),
75 _gclab_allocs(0),
76 _plab_allocs(0),
77 _has_young_lab(false),
78 _live_data(0),
79 _critical_pins(0),
80 _update_watermark(start),
81 _age(0) {
82
83 assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
84 "invalid space boundaries");
85 if (ZapUnusedHeapArea && committed) {
86 SpaceMangler::mangle_region(MemRegion(_bottom, _end));
87 }
88 }
89
90 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
91 stringStream ss;
92 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method);
93 print_on(&ss);
94 fatal("%s", ss.freeze());
95 }
96
97 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahRegionAffiliation affiliation) {
98 shenandoah_assert_heaplocked();
99 reset_age();
100 switch (_state) {
101 case _empty_uncommitted:
102 do_commit();
103 case _empty_committed:
104 set_affiliation(affiliation);
105 set_state(_regular);
106 case _regular:
107 case _pinned:
108 return;
109 default:
110 report_illegal_transition("regular allocation");
111 }
112 }
113
114 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned. This implements
115 // behavior previously performed as a side effect of make_regular_bypass().
116 void ShenandoahHeapRegion::make_young_maybe() {
117 shenandoah_assert_heaplocked();
118 switch (_state) {
119 case _empty_uncommitted:
120 case _empty_committed:
121 case _cset:
122 case _humongous_start:
123 case _humongous_cont:
124 set_affiliation(YOUNG_GENERATION);
125 return;
126 case _pinned_cset:
127 case _regular:
128 case _pinned:
129 return;
130 default:
131 assert(false, "Unexpected _state in make_young_maybe");
132 }
133 }
134
135 void ShenandoahHeapRegion::make_regular_bypass() {
136 shenandoah_assert_heaplocked();
137 assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
138 "only for full or degen GC");
139 reset_age();
140 switch (_state) {
141 case _empty_uncommitted:
142 do_commit();
143 case _empty_committed:
144 case _cset:
145 case _humongous_start:
146 case _humongous_cont:
147 set_state(_regular);
148 return;
149 case _pinned_cset:
150 set_state(_pinned);
151 return;
152 case _regular:
153 case _pinned:
154 return;
155 default:
156 report_illegal_transition("regular bypass");
157 }
158 }
159
160 void ShenandoahHeapRegion::make_humongous_start() {
161 shenandoah_assert_heaplocked();
162 reset_age();
163 switch (_state) {
164 case _empty_uncommitted:
165 do_commit();
166 case _empty_committed:
167 set_state(_humongous_start);
168 return;
169 default:
170 report_illegal_transition("humongous start allocation");
171 }
172 }
173
174 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahRegionAffiliation affiliation) {
175 shenandoah_assert_heaplocked();
176 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
177 set_affiliation(affiliation);
178 reset_age();
179 switch (_state) {
180 case _empty_committed:
181 case _regular:
182 case _humongous_start:
183 case _humongous_cont:
184 set_state(_humongous_start);
185 return;
186 default:
187 report_illegal_transition("humongous start bypass");
188 }
189 }
190
191 void ShenandoahHeapRegion::make_humongous_cont() {
192 shenandoah_assert_heaplocked();
193 reset_age();
194 switch (_state) {
195 case _empty_uncommitted:
196 do_commit();
197 case _empty_committed:
198 set_state(_humongous_cont);
199 return;
200 default:
201 report_illegal_transition("humongous continuation allocation");
202 }
203 }
204
205 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahRegionAffiliation affiliation) {
206 shenandoah_assert_heaplocked();
207 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
208 set_affiliation(affiliation);
209 reset_age();
210 switch (_state) {
211 case _empty_committed:
212 case _regular:
213 case _humongous_start:
214 case _humongous_cont:
215 set_state(_humongous_cont);
216 return;
217 default:
218 report_illegal_transition("humongous continuation bypass");
219 }
220 }
221
222 void ShenandoahHeapRegion::make_pinned() {
223 shenandoah_assert_heaplocked();
224 assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
225
226 switch (_state) {
227 case _regular:
228 set_state(_pinned);
229 case _pinned_cset:
230 case _pinned:
231 return;
232 case _humongous_start:
233 set_state(_pinned_humongous_start);
234 case _pinned_humongous_start:
235 return;
236 case _cset:
237 _state = _pinned_cset;
238 return;
239 default:
240 report_illegal_transition("pinning");
241 }
242 }
243
244 void ShenandoahHeapRegion::make_unpinned() {
245 shenandoah_assert_heaplocked();
246 assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
247
248 switch (_state) {
249 case _pinned:
250 assert(affiliation() != FREE, "Pinned region should not be FREE");
251 set_state(_regular);
252 return;
253 case _regular:
254 case _humongous_start:
255 return;
256 case _pinned_cset:
257 set_state(_cset);
258 return;
259 case _pinned_humongous_start:
260 set_state(_humongous_start);
261 return;
262 default:
263 report_illegal_transition("unpinning");
264 }
265 }
266
267 void ShenandoahHeapRegion::make_cset() {
268 shenandoah_assert_heaplocked();
269 // Leave age untouched. We need to consult the age when we are deciding whether to promote evacuated objects.
270 switch (_state) {
271 case _regular:
272 set_state(_cset);
273 case _cset:
274 return;
275 default:
276 report_illegal_transition("cset");
277 }
278 }
279
280 void ShenandoahHeapRegion::make_trash() {
281 shenandoah_assert_heaplocked();
282 reset_age();
283 switch (_state) {
284 case _cset:
285 // Reclaiming cset regions
286 case _humongous_start:
287 case _humongous_cont:
288 // Reclaiming humongous regions
289 case _regular:
290 // Immediate region reclaim
291 set_state(_trash);
292 return;
293 default:
294 report_illegal_transition("trashing");
295 }
296 }
297
298 void ShenandoahHeapRegion::make_trash_immediate() {
299 make_trash();
300
301 // On this path, we know there are no marked objects in the region,
302 // tell marking context about it to bypass bitmap resets.
303 assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
304 ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
305 }
306
307 void ShenandoahHeapRegion::make_empty() {
308 shenandoah_assert_heaplocked();
309 reset_age();
310 switch (_state) {
311 case _trash:
312 set_state(_empty_committed);
313 _empty_time = os::elapsedTime();
314 return;
315 default:
316 report_illegal_transition("emptying");
317 }
318 }
319
320 void ShenandoahHeapRegion::make_uncommitted() {
321 shenandoah_assert_heaplocked();
322 switch (_state) {
323 case _empty_committed:
324 do_uncommit();
325 set_state(_empty_uncommitted);
326 return;
327 default:
328 report_illegal_transition("uncommiting");
329 }
330 }
331
332 void ShenandoahHeapRegion::make_committed_bypass() {
333 shenandoah_assert_heaplocked();
334 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
335
336 switch (_state) {
337 case _empty_uncommitted:
338 do_commit();
339 set_state(_empty_committed);
340 return;
341 default:
342 report_illegal_transition("commit bypass");
343 }
344 }
345
346 void ShenandoahHeapRegion::reset_alloc_metadata() {
347 _tlab_allocs = 0;
348 _gclab_allocs = 0;
349 _plab_allocs = 0;
350 }
351
352 size_t ShenandoahHeapRegion::get_shared_allocs() const {
353 return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
354 }
355
356 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
357 return _tlab_allocs * HeapWordSize;
358 }
359
360 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
361 return _gclab_allocs * HeapWordSize;
362 }
363
364 size_t ShenandoahHeapRegion::get_plab_allocs() const {
365 return _plab_allocs * HeapWordSize;
366 }
367
368 void ShenandoahHeapRegion::set_live_data(size_t s) {
369 assert(Thread::current()->is_VM_thread(), "by VM thread");
370 _live_data = (s >> LogHeapWordSize);
371 }
372
373 void ShenandoahHeapRegion::print_on(outputStream* st) const {
374 st->print("|");
375 st->print(SIZE_FORMAT_W(5), this->_index);
376
377 switch (_state) {
378 case _empty_uncommitted:
379 st->print("|EU ");
380 break;
381 case _empty_committed:
382 st->print("|EC ");
383 break;
384 case _regular:
385 st->print("|R ");
386 break;
387 case _humongous_start:
392 break;
393 case _humongous_cont:
394 st->print("|HC ");
395 break;
396 case _cset:
397 st->print("|CS ");
398 break;
399 case _trash:
400 st->print("|T ");
401 break;
402 case _pinned:
403 st->print("|P ");
404 break;
405 case _pinned_cset:
406 st->print("|CSP");
407 break;
408 default:
409 ShouldNotReachHere();
410 }
411
412 switch (ShenandoahHeap::heap()->region_affiliation(this)) {
413 case ShenandoahRegionAffiliation::FREE:
414 st->print("|F");
415 break;
416 case ShenandoahRegionAffiliation::YOUNG_GENERATION:
417 st->print("|Y");
418 break;
419 case ShenandoahRegionAffiliation::OLD_GENERATION:
420 st->print("|O");
421 break;
422 default:
423 ShouldNotReachHere();
424 }
425
426 #define SHR_PTR_FORMAT "%12" PRIxPTR
427
428 st->print("|BTE " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
429 p2i(bottom()), p2i(top()), p2i(end()));
430 st->print("|TAMS " SHR_PTR_FORMAT,
431 p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
432 st->print("|UWM " SHR_PTR_FORMAT,
433 p2i(_update_watermark));
434 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
435 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs()));
436 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs()));
437 if (ShenandoahHeap::heap()->mode()->is_generational()) {
438 st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()), proper_unit_for_byte_size(get_plab_allocs()));
439 }
440 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs()));
441 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
442 st->print("|CP " SIZE_FORMAT_W(3), pin_count());
443 st->cr();
444
445 #undef SHR_PTR_FORMAT
446 }
447
448 // oop_iterate without closure and without cancellation. always return true.
449 bool ShenandoahHeapRegion::oop_fill_and_coalesce_wo_cancel() {
450 HeapWord* obj_addr = resume_coalesce_and_fill();
451
452 assert(!is_humongous(), "No need to fill or coalesce humongous regions");
453 if (!is_active()) {
454 end_preemptible_coalesce_and_fill();
455 return true;
456 }
457
458 ShenandoahHeap* heap = ShenandoahHeap::heap();
459 ShenandoahMarkingContext* marking_context = heap->marking_context();
460 // All objects above TAMS are considered live even though their mark bits will not be set. Note that young-
461 // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
462 // while the old-gen concurrent marking is ongoing. These newly promoted objects will reside above TAMS
463 // and will be treated as live during the current old-gen marking pass, even though they will not be
464 // explicitly marked.
465 HeapWord* t = marking_context->top_at_mark_start(this);
466
467 // Expect marking to be completed before these threads invoke this service.
468 assert(heap->active_generation()->is_mark_complete(), "sanity");
469 while (obj_addr < t) {
470 oop obj = cast_to_oop(obj_addr);
471 if (marking_context->is_marked(obj)) {
472 assert(obj->klass() != nullptr, "klass should not be nullptr");
473 obj_addr += obj->size();
474 } else {
475 // Object is not marked. Coalesce and fill dead object with dead neighbors.
476 HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
477 assert(next_marked_obj <= t, "next marked object cannot exceed top");
478 size_t fill_size = next_marked_obj - obj_addr;
479 ShenandoahHeap::fill_with_object(obj_addr, fill_size);
480 heap->card_scan()->coalesce_objects(obj_addr, fill_size);
481 obj_addr = next_marked_obj;
482 }
483 }
484 // Mark that this region has been coalesced and filled
485 end_preemptible_coalesce_and_fill();
486 return true;
487 }
488
489 // oop_iterate without closure, return true if completed without cancellation
490 bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
491 HeapWord* obj_addr = resume_coalesce_and_fill();
492 // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
493 const size_t preemption_stride = 128;
494
495 assert(!is_humongous(), "No need to fill or coalesce humongous regions");
496 if (!is_active()) {
497 end_preemptible_coalesce_and_fill();
498 return true;
499 }
500
501 ShenandoahHeap* heap = ShenandoahHeap::heap();
502 ShenandoahMarkingContext* marking_context = heap->marking_context();
503 // All objects above TAMS are considered live even though their mark bits will not be set. Note that young-
504 // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
505 // while the old-gen concurrent marking is ongoing. These newly promoted objects will reside above TAMS
506 // and will be treated as live during the current old-gen marking pass, even though they will not be
507 // explicitly marked.
508 HeapWord* t = marking_context->top_at_mark_start(this);
509
510 // Expect marking to be completed before these threads invoke this service.
511 assert(heap->active_generation()->is_mark_complete(), "sanity");
512
513 size_t ops_before_preempt_check = preemption_stride;
514 while (obj_addr < t) {
515 oop obj = cast_to_oop(obj_addr);
516 if (marking_context->is_marked(obj)) {
517 assert(obj->klass() != nullptr, "klass should not be nullptr");
518 obj_addr += obj->size();
519 } else {
520 // Object is not marked. Coalesce and fill dead object with dead neighbors.
521 HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
522 assert(next_marked_obj <= t, "next marked object cannot exceed top");
523 size_t fill_size = next_marked_obj - obj_addr;
524 ShenandoahHeap::fill_with_object(obj_addr, fill_size);
525 heap->card_scan()->coalesce_objects(obj_addr, fill_size);
526 obj_addr = next_marked_obj;
527 }
528 if (ops_before_preempt_check-- == 0) {
529 if (heap->cancelled_gc()) {
530 suspend_coalesce_and_fill(obj_addr);
531 return false;
532 }
533 ops_before_preempt_check = preemption_stride;
534 }
535 }
536 // Mark that this region has been coalesced and filled
537 end_preemptible_coalesce_and_fill();
538 return true;
539 }
540
541 void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
542 if (!is_active()) return;
543 if (is_humongous()) {
544 // No need to fill dead within humongous regions. Either the entire region is dead, or the entire region is
545 // unchanged. A humongous region holds no more than one humongous object.
546 oop_iterate_humongous(blk);
547 } else {
548 global_oop_iterate_objects_and_fill_dead(blk);
549 }
550 }
551
552 void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
553 assert(!is_humongous(), "no humongous region here");
554 HeapWord* obj_addr = bottom();
555
556 ShenandoahHeap* heap = ShenandoahHeap::heap();
557 ShenandoahMarkingContext* marking_context = heap->marking_context();
558 RememberedScanner* rem_set_scanner = heap->card_scan();
559 // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
560 HeapWord* t = marking_context->top_at_mark_start(this);
561
562 assert(heap->active_generation()->is_mark_complete(), "sanity");
563
564 while (obj_addr < t) {
565 oop obj = cast_to_oop(obj_addr);
566 if (marking_context->is_marked(obj)) {
567 assert(obj->klass() != nullptr, "klass should not be nullptr");
568 // when promoting an entire region, we have to register the marked objects as well
569 obj_addr += obj->oop_iterate_size(blk);
570 } else {
571 // Object is not marked. Coalesce and fill dead object with dead neighbors.
572 HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
573 assert(next_marked_obj <= t, "next marked object cannot exceed top");
574 size_t fill_size = next_marked_obj - obj_addr;
575 ShenandoahHeap::fill_with_object(obj_addr, fill_size);
576
577 // coalesce_objects() unregisters all but first object subsumed within coalesced range.
578 rem_set_scanner->coalesce_objects(obj_addr, fill_size);
579 obj_addr = next_marked_obj;
580 }
581 }
582
583 // Any object above TAMS and below top() is considered live.
584 t = top();
585 while (obj_addr < t) {
586 oop obj = cast_to_oop(obj_addr);
587 obj_addr += obj->oop_iterate_size(blk);
588 }
589 }
590
591 // DO NOT CANCEL. If this worker thread has accepted responsibility for scanning a particular range of addresses, it
592 // must finish the work before it can be cancelled.
593 void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only,
594 HeapWord* start, size_t words, bool write_table) {
595 assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
596 assert(is_humongous(), "only humongous region here");
597 ShenandoahHeap* heap = ShenandoahHeap::heap();
598
599 // Find head.
600 ShenandoahHeapRegion* r = humongous_start_region();
601 assert(r->is_humongous_start(), "need humongous head here");
602 assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
603 "slice must be integral number of cards");
604
605 oop obj = cast_to_oop(r->bottom());
606 RememberedScanner* scanner = ShenandoahHeap::heap()->card_scan();
607 size_t card_index = scanner->card_index_for_addr(start);
608 size_t num_cards = words / CardTable::card_size_in_words();
609
610 if (dirty_only) {
611 if (write_table) {
612 while (num_cards-- > 0) {
613 if (scanner->is_write_card_dirty(card_index++)) {
614 obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
615 }
616 start += CardTable::card_size_in_words();
617 }
618 } else {
619 while (num_cards-- > 0) {
620 if (scanner->is_card_dirty(card_index++)) {
621 obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
622 }
623 start += CardTable::card_size_in_words();
624 }
625 }
626 } else {
627 // Scan all data, regardless of whether cards are dirty
628 obj->oop_iterate(blk, MemRegion(start, start + num_cards * CardTable::card_size_in_words()));
629 }
630 }
631
632 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk, HeapWord* start, size_t words) {
633 assert(is_humongous(), "only humongous region here");
634 // Find head.
635 ShenandoahHeapRegion* r = humongous_start_region();
636 assert(r->is_humongous_start(), "need humongous head here");
637 oop obj = cast_to_oop(r->bottom());
638 obj->oop_iterate(blk, MemRegion(start, start + words));
639 }
640
641 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
642 assert(is_humongous(), "only humongous region here");
643 // Find head.
644 ShenandoahHeapRegion* r = humongous_start_region();
645 assert(r->is_humongous_start(), "need humongous head here");
646 oop obj = cast_to_oop(r->bottom());
647 obj->oop_iterate(blk, MemRegion(bottom(), top()));
648 }
649
650 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
651 ShenandoahHeap* heap = ShenandoahHeap::heap();
652 assert(is_humongous(), "Must be a part of the humongous region");
653 size_t i = index();
654 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
655 while (!r->is_humongous_start()) {
656 assert(i > 0, "Sanity");
657 i--;
658 r = heap->get_region(i);
659 assert(r->is_humongous(), "Must be a part of the humongous region");
660 }
661 assert(r->is_humongous_start(), "Must be");
662 return r;
663 }
664
665 void ShenandoahHeapRegion::recycle() {
666 ShenandoahHeap* heap = ShenandoahHeap::heap();
667 shenandoah_assert_heaplocked();
668
669 heap->generation_for(affiliation())->decrease_used(used());
670
671 set_top(bottom());
672 clear_live_data();
673
674 reset_alloc_metadata();
675
676 heap->marking_context()->reset_top_at_mark_start(this);
677 set_update_watermark(bottom());
678
679 make_empty();
680 set_affiliation(FREE);
681
682 if (ZapUnusedHeapArea) {
683 SpaceMangler::mangle_region(MemRegion(bottom(), end()));
684 }
685 }
686
687 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
688 assert(MemRegion(bottom(), end()).contains(p),
689 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
690 p2i(p), p2i(bottom()), p2i(end()));
691 if (p >= top()) {
692 return top();
693 } else {
694 HeapWord* last = bottom();
695 HeapWord* cur = last;
696 while (cur <= p) {
697 last = cur;
698 cur += cast_to_oop(cur)->size();
699 }
700 shenandoah_assert_correct(nullptr, cast_to_oop(last));
705 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
706 assert(MemRegion(bottom(), end()).contains(p),
707 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
708 p2i(p), p2i(bottom()), p2i(end()));
709 if (p < top()) {
710 return cast_to_oop(p)->size();
711 } else {
712 assert(p == top(), "just checking");
713 return pointer_delta(end(), (HeapWord*) p);
714 }
715 }
716
717 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
718 // Absolute minimums we should not ever break.
719 static const size_t MIN_REGION_SIZE = 256*K;
720
721 if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
722 FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
723 }
724
725 // Generational Shenandoah needs this alignment for card tables.
726 if (strcmp(ShenandoahGCMode, "generational") == 0) {
727 max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint());
728 }
729
730 size_t region_size;
731 if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
732 if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
733 err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
734 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
735 byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
736 MIN_NUM_REGIONS,
737 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
738 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
739 }
740 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
741 err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
742 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
743 byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
744 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
745 }
746 if (ShenandoahMinRegionSize < MinTLABSize) {
747 err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
748 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
749 byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize));
916 evt.set_used(used());
917 evt.set_from(_state);
918 evt.set_to(to);
919 evt.commit();
920 }
921 _state = to;
922 }
923
924 void ShenandoahHeapRegion::record_pin() {
925 Atomic::add(&_critical_pins, (size_t)1);
926 }
927
928 void ShenandoahHeapRegion::record_unpin() {
929 assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
930 Atomic::sub(&_critical_pins, (size_t)1);
931 }
932
933 size_t ShenandoahHeapRegion::pin_count() const {
934 return Atomic::load(&_critical_pins);
935 }
936
937 void ShenandoahHeapRegion::set_affiliation(ShenandoahRegionAffiliation new_affiliation) {
938 ShenandoahHeap* heap = ShenandoahHeap::heap();
939
940 ShenandoahRegionAffiliation region_affiliation = heap->region_affiliation(this);
941 {
942 ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
943 log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
944 ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
945 index(), affiliation_name(region_affiliation), affiliation_name(new_affiliation),
946 p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
947 }
948
949 #ifdef ASSERT
950 {
951 // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
952 ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
953 size_t idx = this->index();
954 HeapWord* top_bitmap = ctx->top_bitmap(this);
955
956 assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
957 "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
958 p2i(top_bitmap), p2i(_end));
959 }
960 #endif
961
962 if (region_affiliation == new_affiliation) {
963 return;
964 }
965
966 if (!heap->mode()->is_generational()) {
967 heap->set_affiliation(this, new_affiliation);
968 return;
969 }
970
971 log_trace(gc)("Changing affiliation of region %zu from %s to %s",
972 index(), affiliation_name(region_affiliation), affiliation_name(new_affiliation));
973
974 if (region_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
975 heap->young_generation()->decrement_affiliated_region_count();
976 } else if (region_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
977 heap->old_generation()->decrement_affiliated_region_count();
978 }
979
980 size_t regions;
981 switch (new_affiliation) {
982 case FREE:
983 assert(!has_live(), "Free region should not have live data");
984 break;
985 case YOUNG_GENERATION:
986 reset_age();
987 regions = heap->young_generation()->increment_affiliated_region_count();
988 // During Full GC, we allow temporary violation of this requirement. We enforce that this condition is
989 // restored upon completion of Full GC.
990 assert(heap->is_full_gc_in_progress() ||
991 (regions * ShenandoahHeapRegion::region_size_bytes() <= heap->young_generation()->adjusted_capacity()),
992 "Number of young regions cannot exceed adjusted capacity");
993 break;
994 case OLD_GENERATION:
995 regions = heap->old_generation()->increment_affiliated_region_count();
996 // During Full GC, we allow temporary violation of this requirement. We enforce that this condition is
997 // restored upon completion of Full GC.
998 assert(heap->is_full_gc_in_progress() ||
999 (regions * ShenandoahHeapRegion::region_size_bytes() <= heap->old_generation()->adjusted_capacity()),
1000 "Number of old regions cannot exceed adjusted capacity");
1001 break;
1002 default:
1003 ShouldNotReachHere();
1004 return;
1005 }
1006 heap->set_affiliation(this, new_affiliation);
1007 }
1008
1009 // Returns number of regions promoted, or zero if we choose not to promote.
1010 size_t ShenandoahHeapRegion::promote_humongous() {
1011 ShenandoahHeap* heap = ShenandoahHeap::heap();
1012 ShenandoahMarkingContext* marking_context = heap->marking_context();
1013 assert(heap->active_generation()->is_mark_complete(), "sanity");
1014 assert(is_young(), "Only young regions can be promoted");
1015 assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
1016 assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
1017
1018 ShenandoahGeneration* old_generation = heap->old_generation();
1019 ShenandoahGeneration* young_generation = heap->young_generation();
1020
1021 oop obj = cast_to_oop(bottom());
1022 assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
1023
1024 // TODO: Consider not promoting humongous objects that represent primitive arrays. Leaving a primitive array
1025 // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not
1026 // scanned. Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when
1027 // it becomes garbage. Better to not make this change until sizes of young-gen and old-gen are completely
1028 // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
1029 // has carefully analyzed the required sizes of an application's young-gen and old-gen.
1030
1031 size_t spanned_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize);
1032 size_t index_limit = index() + spanned_regions;
1033
1034 {
1035 // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
1036 // young to old.
1037 ShenandoahHeapLocker locker(heap->lock());
1038 size_t available_old_regions = old_generation->adjusted_unaffiliated_regions();
1039 if (spanned_regions <= available_old_regions) {
1040 log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
1041
1042 // For this region and each humongous continuation region spanned by this humongous object, change
1043 // affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory
1044 // in the last humongous region that is not spanned by obj is currently not used.
1045 for (size_t i = index(); i < index_limit; i++) {
1046 ShenandoahHeapRegion* r = heap->get_region(i);
1047 log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
1048 r->index(), p2i(r->bottom()), p2i(r->top()));
1049 // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
1050 r->set_affiliation(OLD_GENERATION);
1051 old_generation->increase_used(r->used());
1052 young_generation->decrease_used(r->used());
1053 }
1054 // Then fall through to finish the promotion after releasing the heap lock.
1055 } else {
1056 // There are not enough available old regions to promote this humongous region at this time, so defer promotion.
1057 // TODO: Consider allowing the promotion now, with the expectation that we can resize and/or collect OLD
1058 // momentarily to address the transient violation of budgets. Some problems that need to be addressed in order
1059 // to allow transient violation of capacity budgets are:
1060 // 1. Various size_t subtractions assume usage is less than capacity, and thus assume there will be no
1061 // arithmetic underflow when we subtract usage from capacity. The results of such size_t subtractions
1062 // would need to be guarded and special handling provided.
1063 // 2. ShenandoahVerifier enforces that usage is less than capacity. If we are going to relax this constraint,
1064 // we need to think about what conditions allow the constraint to be violated and document and implement the
1065 // changes.
1066 return 0;
1067 }
1068 }
1069
1070 // Since this region may have served previously as OLD, it may hold obsolete object range info.
1071 heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
1072 // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
1073 heap->card_scan()->register_object_wo_lock(bottom());
1074
1075 if (obj->is_typeArray()) {
1076 // Primitive arrays don't need to be scanned.
1077 log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1078 index(), p2i(bottom()), p2i(bottom() + obj->size()));
1079 heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
1080 } else {
1081 log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1082 index(), p2i(bottom()), p2i(bottom() + obj->size()));
1083 heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
1084 }
1085 return index_limit - index();
1086 }
|