1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/shared/space.inline.hpp"
28 #include "gc/shared/tlab_globals.hpp"
29 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
32 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
33 #include "jfr/jfrEvents.hpp"
34 #include "memory/allocation.hpp"
35 #include "memory/iterator.inline.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/os.hpp"
44 #include "runtime/safepoint.hpp"
45 #include "utilities/powerOfTwo.hpp"
46
47 size_t ShenandoahHeapRegion::RegionCount = 0;
48 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
49 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
50 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
51 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
52 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
53 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
54 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
55 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
56 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
57 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
58
59 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
60 _index(index),
61 _bottom(start),
62 _end(start + RegionSizeWords),
63 _new_top(nullptr),
64 _empty_time(os::elapsedTime()),
65 _state(committed ? _empty_committed : _empty_uncommitted),
66 _top(start),
67 _tlab_allocs(0),
68 _gclab_allocs(0),
69 _live_data(0),
70 _critical_pins(0),
71 _update_watermark(start) {
72
73 assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
74 "invalid space boundaries");
75 if (ZapUnusedHeapArea && committed) {
76 SpaceMangler::mangle_region(MemRegion(_bottom, _end));
77 }
78 }
79
80 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
81 stringStream ss;
82 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method);
83 print_on(&ss);
84 fatal("%s", ss.freeze());
85 }
86
87 void ShenandoahHeapRegion::make_regular_allocation() {
88 shenandoah_assert_heaplocked();
89
90 switch (_state) {
91 case _empty_uncommitted:
92 do_commit();
93 case _empty_committed:
94 set_state(_regular);
95 case _regular:
96 case _pinned:
97 return;
98 default:
99 report_illegal_transition("regular allocation");
100 }
101 }
102
103 void ShenandoahHeapRegion::make_regular_bypass() {
104 shenandoah_assert_heaplocked();
105 assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
106 "only for full or degen GC");
107
108 switch (_state) {
109 case _empty_uncommitted:
110 do_commit();
111 case _empty_committed:
112 case _cset:
113 case _humongous_start:
114 case _humongous_cont:
115 set_state(_regular);
116 return;
117 case _pinned_cset:
118 set_state(_pinned);
119 return;
120 case _regular:
121 case _pinned:
122 return;
123 default:
124 report_illegal_transition("regular bypass");
125 }
126 }
127
128 void ShenandoahHeapRegion::make_humongous_start() {
129 shenandoah_assert_heaplocked();
130 switch (_state) {
131 case _empty_uncommitted:
132 do_commit();
133 case _empty_committed:
134 set_state(_humongous_start);
135 return;
136 default:
137 report_illegal_transition("humongous start allocation");
138 }
139 }
140
141 void ShenandoahHeapRegion::make_humongous_start_bypass() {
142 shenandoah_assert_heaplocked();
143 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
144
145 switch (_state) {
146 case _empty_committed:
147 case _regular:
148 case _humongous_start:
149 case _humongous_cont:
150 set_state(_humongous_start);
151 return;
152 default:
153 report_illegal_transition("humongous start bypass");
154 }
155 }
156
157 void ShenandoahHeapRegion::make_humongous_cont() {
158 shenandoah_assert_heaplocked();
159 switch (_state) {
160 case _empty_uncommitted:
161 do_commit();
162 case _empty_committed:
163 set_state(_humongous_cont);
164 return;
165 default:
166 report_illegal_transition("humongous continuation allocation");
167 }
168 }
169
170 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
171 shenandoah_assert_heaplocked();
172 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
173
174 switch (_state) {
175 case _empty_committed:
176 case _regular:
177 case _humongous_start:
178 case _humongous_cont:
179 set_state(_humongous_cont);
180 return;
181 default:
182 report_illegal_transition("humongous continuation bypass");
183 }
184 }
185
186 void ShenandoahHeapRegion::make_pinned() {
187 shenandoah_assert_heaplocked();
188 assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
189
190 switch (_state) {
191 case _regular:
192 set_state(_pinned);
193 case _pinned_cset:
194 case _pinned:
195 return;
196 case _humongous_start:
197 set_state(_pinned_humongous_start);
198 case _pinned_humongous_start:
199 return;
200 case _cset:
201 _state = _pinned_cset;
202 return;
203 default:
204 report_illegal_transition("pinning");
205 }
206 }
207
208 void ShenandoahHeapRegion::make_unpinned() {
209 shenandoah_assert_heaplocked();
210 assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
211
212 switch (_state) {
213 case _pinned:
214 set_state(_regular);
215 return;
216 case _regular:
217 case _humongous_start:
218 return;
219 case _pinned_cset:
220 set_state(_cset);
221 return;
222 case _pinned_humongous_start:
223 set_state(_humongous_start);
224 return;
225 default:
226 report_illegal_transition("unpinning");
227 }
228 }
229
230 void ShenandoahHeapRegion::make_cset() {
231 shenandoah_assert_heaplocked();
232 switch (_state) {
233 case _regular:
234 set_state(_cset);
235 case _cset:
236 return;
237 default:
238 report_illegal_transition("cset");
239 }
240 }
241
242 void ShenandoahHeapRegion::make_trash() {
243 shenandoah_assert_heaplocked();
244 switch (_state) {
245 case _cset:
246 // Reclaiming cset regions
247 case _humongous_start:
248 case _humongous_cont:
249 // Reclaiming humongous regions
250 case _regular:
251 // Immediate region reclaim
252 set_state(_trash);
253 return;
254 default:
255 report_illegal_transition("trashing");
256 }
257 }
258
259 void ShenandoahHeapRegion::make_trash_immediate() {
260 make_trash();
261
262 // On this path, we know there are no marked objects in the region,
263 // tell marking context about it to bypass bitmap resets.
264 ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);
265 }
266
267 void ShenandoahHeapRegion::make_empty() {
268 shenandoah_assert_heaplocked();
269 switch (_state) {
270 case _trash:
271 set_state(_empty_committed);
272 _empty_time = os::elapsedTime();
273 return;
274 default:
275 report_illegal_transition("emptying");
276 }
277 }
278
279 void ShenandoahHeapRegion::make_uncommitted() {
280 shenandoah_assert_heaplocked();
281 switch (_state) {
282 case _empty_committed:
283 do_uncommit();
284 set_state(_empty_uncommitted);
285 return;
286 default:
287 report_illegal_transition("uncommiting");
288 }
289 }
290
291 void ShenandoahHeapRegion::make_committed_bypass() {
292 shenandoah_assert_heaplocked();
293 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
294
295 switch (_state) {
296 case _empty_uncommitted:
297 do_commit();
298 set_state(_empty_committed);
299 return;
300 default:
301 report_illegal_transition("commit bypass");
302 }
303 }
304
305 void ShenandoahHeapRegion::reset_alloc_metadata() {
306 _tlab_allocs = 0;
307 _gclab_allocs = 0;
308 }
309
310 size_t ShenandoahHeapRegion::get_shared_allocs() const {
311 return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
312 }
313
314 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
315 return _tlab_allocs * HeapWordSize;
316 }
317
318 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
319 return _gclab_allocs * HeapWordSize;
320 }
321
322 void ShenandoahHeapRegion::set_live_data(size_t s) {
323 assert(Thread::current()->is_VM_thread(), "by VM thread");
324 _live_data = (s >> LogHeapWordSize);
325 }
326
327 void ShenandoahHeapRegion::print_on(outputStream* st) const {
328 st->print("|");
329 st->print(SIZE_FORMAT_W(5), this->_index);
330
331 switch (_state) {
332 case _empty_uncommitted:
333 st->print("|EU ");
334 break;
335 case _empty_committed:
336 st->print("|EC ");
337 break;
338 case _regular:
339 st->print("|R ");
340 break;
341 case _humongous_start:
342 st->print("|H ");
343 break;
344 case _pinned_humongous_start:
345 st->print("|HP ");
346 break;
347 case _humongous_cont:
348 st->print("|HC ");
349 break;
350 case _cset:
351 st->print("|CS ");
352 break;
353 case _trash:
354 st->print("|TR ");
355 break;
356 case _pinned:
357 st->print("|P ");
358 break;
359 case _pinned_cset:
360 st->print("|CSP");
361 break;
362 default:
363 ShouldNotReachHere();
364 }
365
366 #define SHR_PTR_FORMAT "%12" PRIxPTR
367
368 st->print("|BTE " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
369 p2i(bottom()), p2i(top()), p2i(end()));
370 st->print("|TAMS " SHR_PTR_FORMAT,
371 p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
372 st->print("|UWM " SHR_PTR_FORMAT,
373 p2i(_update_watermark));
374 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
375 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs()));
376 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs()));
377 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs()));
378 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
379 st->print("|CP " SIZE_FORMAT_W(3), pin_count());
380 st->cr();
381
382 #undef SHR_PTR_FORMAT
383 }
384
385 void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {
386 if (!is_active()) return;
387 if (is_humongous()) {
388 oop_iterate_humongous(blk);
389 } else {
390 oop_iterate_objects(blk);
391 }
392 }
393
394 void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
395 assert(! is_humongous(), "no humongous region here");
396 HeapWord* obj_addr = bottom();
397 HeapWord* t = top();
398 // Could call objects iterate, but this is easier.
399 while (obj_addr < t) {
400 oop obj = cast_to_oop(obj_addr);
401 obj_addr += obj->oop_iterate_size(blk);
402 }
403 }
404
405 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
406 assert(is_humongous(), "only humongous region here");
407 // Find head.
408 ShenandoahHeapRegion* r = humongous_start_region();
409 assert(r->is_humongous_start(), "need humongous head here");
410 oop obj = cast_to_oop(r->bottom());
411 obj->oop_iterate(blk, MemRegion(bottom(), top()));
412 }
413
414 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
415 ShenandoahHeap* heap = ShenandoahHeap::heap();
416 assert(is_humongous(), "Must be a part of the humongous region");
417 size_t i = index();
418 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
419 while (!r->is_humongous_start()) {
420 assert(i > 0, "Sanity");
421 i--;
422 r = heap->get_region(i);
423 assert(r->is_humongous(), "Must be a part of the humongous region");
424 }
425 assert(r->is_humongous_start(), "Must be");
426 return r;
427 }
428
429 void ShenandoahHeapRegion::recycle() {
430 set_top(bottom());
431 clear_live_data();
432
433 reset_alloc_metadata();
434
435 ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
436 set_update_watermark(bottom());
437
438 make_empty();
439
440 if (ZapUnusedHeapArea) {
441 SpaceMangler::mangle_region(MemRegion(bottom(), end()));
442 }
443 }
444
445 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
446 assert(MemRegion(bottom(), end()).contains(p),
447 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
448 p2i(p), p2i(bottom()), p2i(end()));
449 if (p >= top()) {
450 return top();
451 } else {
452 HeapWord* last = bottom();
453 HeapWord* cur = last;
454 while (cur <= p) {
455 last = cur;
456 cur += cast_to_oop(cur)->size();
457 }
458 shenandoah_assert_correct(nullptr, cast_to_oop(last));
459 return last;
460 }
461 }
462
463 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
464 assert(MemRegion(bottom(), end()).contains(p),
465 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
466 p2i(p), p2i(bottom()), p2i(end()));
467 if (p < top()) {
468 return cast_to_oop(p)->size();
469 } else {
470 assert(p == top(), "just checking");
471 return pointer_delta(end(), (HeapWord*) p);
472 }
473 }
474
475 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
476 // Absolute minimums we should not ever break.
477 static const size_t MIN_REGION_SIZE = 256*K;
478
479 if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
480 FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
481 }
482
483 size_t region_size;
484 if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
485 if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
486 err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
487 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
488 byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
489 MIN_NUM_REGIONS,
490 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
491 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
492 }
493 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
494 err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
495 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
496 byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
497 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
498 }
499 if (ShenandoahMinRegionSize < MinTLABSize) {
500 err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
501 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
502 byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize));
503 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
504 }
505 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
506 err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
507 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
508 byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
509 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
510 }
511 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
512 err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
513 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
514 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
515 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
516 }
517
518 // We rapidly expand to max_heap_size in most scenarios, so that is the measure
519 // for usual heap sizes. Do not depend on initial_heap_size here.
520 region_size = max_heap_size / ShenandoahTargetNumRegions;
521
522 // Now make sure that we don't go over or under our limits.
523 region_size = MAX2(ShenandoahMinRegionSize, region_size);
524 region_size = MIN2(ShenandoahMaxRegionSize, region_size);
525
526 } else {
527 if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
528 err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
529 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
530 byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
531 MIN_NUM_REGIONS,
532 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
533 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
534 }
535 if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
536 err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
537 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
538 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
539 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
540 }
541 if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
542 err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
543 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
544 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
545 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
546 }
547 region_size = ShenandoahRegionSize;
548 }
549
550 // Make sure region size and heap size are page aligned.
551 // If large pages are used, we ensure that region size is aligned to large page size if
552 // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
553 // region size to regular page size.
554
555 // Figure out page size to use, and aligns up heap to page size
556 size_t page_size = os::vm_page_size();
557 if (UseLargePages) {
558 size_t large_page_size = os::large_page_size();
559 max_heap_size = align_up(max_heap_size, large_page_size);
560 if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
561 page_size = large_page_size;
562 } else {
563 // Should have been checked during argument initialization
564 assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
565 }
566 } else {
567 max_heap_size = align_up(max_heap_size, page_size);
568 }
569
570 // Align region size to page size
571 region_size = align_up(region_size, page_size);
572
573 int region_size_log = log2i(region_size);
574 // Recalculate the region size to make sure it's a power of
575 // 2. This means that region_size is the largest power of 2 that's
576 // <= what we've calculated so far.
577 region_size = size_t(1) << region_size_log;
578
579 // Now, set up the globals.
580 guarantee(RegionSizeBytesShift == 0, "we should only set it once");
581 RegionSizeBytesShift = (size_t)region_size_log;
582
583 guarantee(RegionSizeWordsShift == 0, "we should only set it once");
584 RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
585
586 guarantee(RegionSizeBytes == 0, "we should only set it once");
587 RegionSizeBytes = region_size;
588 RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
589 assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
590
591 guarantee(RegionSizeWordsMask == 0, "we should only set it once");
592 RegionSizeWordsMask = RegionSizeWords - 1;
593
594 guarantee(RegionSizeBytesMask == 0, "we should only set it once");
595 RegionSizeBytesMask = RegionSizeBytes - 1;
596
597 guarantee(RegionCount == 0, "we should only set it once");
598 RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
599 guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
600
601 guarantee(HumongousThresholdWords == 0, "we should only set it once");
602 HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
603 HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment);
604 assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
605
606 guarantee(HumongousThresholdBytes == 0, "we should only set it once");
607 HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
608 assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
609
610 guarantee(MaxTLABSizeWords == 0, "we should only set it once");
611 MaxTLABSizeWords = MIN2(RegionSizeWords, HumongousThresholdWords);
612 MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
613
614 guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
615 MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
616 assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
617
618 return max_heap_size;
619 }
620
621 void ShenandoahHeapRegion::do_commit() {
622 ShenandoahHeap* heap = ShenandoahHeap::heap();
623 if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
624 report_java_out_of_memory("Unable to commit region");
625 }
626 if (!heap->commit_bitmap_slice(this)) {
627 report_java_out_of_memory("Unable to commit bitmaps for region");
628 }
629 if (AlwaysPreTouch) {
630 os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
631 }
632 heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
633 }
634
635 void ShenandoahHeapRegion::do_uncommit() {
636 ShenandoahHeap* heap = ShenandoahHeap::heap();
637 if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
638 report_java_out_of_memory("Unable to uncommit region");
639 }
640 if (!heap->uncommit_bitmap_slice(this)) {
641 report_java_out_of_memory("Unable to uncommit bitmaps for region");
642 }
643 heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
644 }
645
646 void ShenandoahHeapRegion::set_state(RegionState to) {
647 EventShenandoahHeapRegionStateChange evt;
648 if (evt.should_commit()){
649 evt.set_index((unsigned) index());
650 evt.set_start((uintptr_t)bottom());
651 evt.set_used(used());
652 evt.set_from(_state);
653 evt.set_to(to);
654 evt.commit();
655 }
656 _state = to;
657 }
658
659 void ShenandoahHeapRegion::record_pin() {
660 Atomic::add(&_critical_pins, (size_t)1);
661 }
662
663 void ShenandoahHeapRegion::record_unpin() {
664 assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
665 Atomic::sub(&_critical_pins, (size_t)1);
666 }
667
668 size_t ShenandoahHeapRegion::pin_count() const {
669 return Atomic::load(&_critical_pins);
670 }