1 /* 2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved. 4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6 * 7 * This code is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 only, as 9 * published by the Free Software Foundation. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 * 25 */ 26 27 #include "precompiled.hpp" 28 #include "gc/shared/cardTable.hpp" 29 #include "gc/shared/space.inline.hpp" 30 #include "gc/shared/tlab_globals.hpp" 31 #include "gc/shenandoah/shenandoahCardTable.hpp" 32 #include "gc/shenandoah/shenandoahFreeSet.hpp" 33 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 35 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 37 #include "gc/shenandoah/shenandoahOldGeneration.hpp" 38 #include "gc/shenandoah/shenandoahGeneration.hpp" 39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp" 40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp" 41 #include "jfr/jfrEvents.hpp" 42 #include "memory/allocation.hpp" 43 #include "memory/iterator.inline.hpp" 44 #include "memory/resourceArea.hpp" 45 #include "memory/universe.hpp" 46 #include "oops/oop.inline.hpp" 47 #include "runtime/atomic.hpp" 48 #include "runtime/globals_extension.hpp" 49 #include "runtime/java.hpp" 50 #include "runtime/mutexLocker.hpp" 51 #include "runtime/os.hpp" 52 #include "runtime/safepoint.hpp" 53 #include "utilities/powerOfTwo.hpp" 54 55 56 size_t ShenandoahHeapRegion::RegionCount = 0; 57 size_t ShenandoahHeapRegion::RegionSizeBytes = 0; 58 size_t ShenandoahHeapRegion::RegionSizeWords = 0; 59 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0; 60 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0; 61 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0; 62 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0; 63 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0; 64 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0; 65 66 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) : 67 _index(index), 68 _bottom(start), 69 _end(start + RegionSizeWords), 70 _new_top(nullptr), 71 _empty_time(os::elapsedTime()), 72 _top_before_promoted(nullptr), 73 _state(committed ? _empty_committed : _empty_uncommitted), 74 _top(start), 75 _tlab_allocs(0), 76 _gclab_allocs(0), 77 _plab_allocs(0), 78 _live_data(0), 79 _critical_pins(0), 80 _update_watermark(start), 81 _age(0) 82 #ifdef SHENANDOAH_CENSUS_NOISE 83 , _youth(0) 84 #endif // SHENANDOAH_CENSUS_NOISE 85 { 86 87 assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end), 88 "invalid space boundaries"); 89 if (ZapUnusedHeapArea && committed) { 90 SpaceMangler::mangle_region(MemRegion(_bottom, _end)); 91 } 92 } 93 94 void ShenandoahHeapRegion::report_illegal_transition(const char *method) { 95 stringStream ss; 96 ss.print("Illegal region state transition from \"%s\", at %s\n ", region_state_to_string(_state), method); 97 print_on(&ss); 98 fatal("%s", ss.freeze()); 99 } 100 101 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) { 102 shenandoah_assert_heaplocked(); 103 reset_age(); 104 switch (_state) { 105 case _empty_uncommitted: 106 do_commit(); 107 case _empty_committed: 108 assert(this->affiliation() == affiliation, "Region affiliation should already be established"); 109 set_state(_regular); 110 case _regular: 111 case _pinned: 112 return; 113 default: 114 report_illegal_transition("regular allocation"); 115 } 116 } 117 118 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned. This implements 119 // behavior previously performed as a side effect of make_regular_bypass(). This is used by Full GC in non-generational 120 // modes to transition regions from FREE. Note that all non-free regions in single-generational modes are young. 121 void ShenandoahHeapRegion::make_affiliated_maybe() { 122 shenandoah_assert_heaplocked(); 123 assert(!ShenandoahHeap::heap()->mode()->is_generational(), "Only call if non-generational"); 124 switch (_state) { 125 case _empty_uncommitted: 126 case _empty_committed: 127 case _cset: 128 case _humongous_start: 129 case _humongous_cont: 130 if (affiliation() != YOUNG_GENERATION) { 131 set_affiliation(YOUNG_GENERATION); 132 } 133 return; 134 case _pinned_cset: 135 case _regular: 136 case _pinned: 137 return; 138 default: 139 assert(false, "Unexpected _state in make_affiliated_maybe"); 140 } 141 } 142 143 void ShenandoahHeapRegion::make_regular_bypass() { 144 shenandoah_assert_heaplocked(); 145 assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(), 146 "only for full or degen GC"); 147 reset_age(); 148 switch (_state) { 149 case _empty_uncommitted: 150 do_commit(); 151 case _empty_committed: 152 case _cset: 153 case _humongous_start: 154 case _humongous_cont: 155 set_state(_regular); 156 return; 157 case _pinned_cset: 158 set_state(_pinned); 159 return; 160 case _regular: 161 case _pinned: 162 return; 163 default: 164 report_illegal_transition("regular bypass"); 165 } 166 } 167 168 void ShenandoahHeapRegion::make_humongous_start() { 169 shenandoah_assert_heaplocked(); 170 reset_age(); 171 switch (_state) { 172 case _empty_uncommitted: 173 do_commit(); 174 case _empty_committed: 175 set_state(_humongous_start); 176 return; 177 default: 178 report_illegal_transition("humongous start allocation"); 179 } 180 } 181 182 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) { 183 shenandoah_assert_heaplocked(); 184 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC"); 185 // Don't bother to account for affiliated regions during Full GC. We recompute totals at end. 186 set_affiliation(affiliation); 187 reset_age(); 188 switch (_state) { 189 case _empty_committed: 190 case _regular: 191 case _humongous_start: 192 case _humongous_cont: 193 set_state(_humongous_start); 194 return; 195 default: 196 report_illegal_transition("humongous start bypass"); 197 } 198 } 199 200 void ShenandoahHeapRegion::make_humongous_cont() { 201 shenandoah_assert_heaplocked(); 202 reset_age(); 203 switch (_state) { 204 case _empty_uncommitted: 205 do_commit(); 206 case _empty_committed: 207 set_state(_humongous_cont); 208 return; 209 default: 210 report_illegal_transition("humongous continuation allocation"); 211 } 212 } 213 214 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) { 215 shenandoah_assert_heaplocked(); 216 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC"); 217 set_affiliation(affiliation); 218 // Don't bother to account for affiliated regions during Full GC. We recompute totals at end. 219 reset_age(); 220 switch (_state) { 221 case _empty_committed: 222 case _regular: 223 case _humongous_start: 224 case _humongous_cont: 225 set_state(_humongous_cont); 226 return; 227 default: 228 report_illegal_transition("humongous continuation bypass"); 229 } 230 } 231 232 void ShenandoahHeapRegion::make_pinned() { 233 shenandoah_assert_heaplocked(); 234 assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count()); 235 236 switch (_state) { 237 case _regular: 238 set_state(_pinned); 239 case _pinned_cset: 240 case _pinned: 241 return; 242 case _humongous_start: 243 set_state(_pinned_humongous_start); 244 case _pinned_humongous_start: 245 return; 246 case _cset: 247 _state = _pinned_cset; 248 return; 249 default: 250 report_illegal_transition("pinning"); 251 } 252 } 253 254 void ShenandoahHeapRegion::make_unpinned() { 255 shenandoah_assert_heaplocked(); 256 assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count()); 257 258 switch (_state) { 259 case _pinned: 260 assert(is_affiliated(), "Pinned region should be affiliated"); 261 set_state(_regular); 262 return; 263 case _regular: 264 case _humongous_start: 265 return; 266 case _pinned_cset: 267 set_state(_cset); 268 return; 269 case _pinned_humongous_start: 270 set_state(_humongous_start); 271 return; 272 default: 273 report_illegal_transition("unpinning"); 274 } 275 } 276 277 void ShenandoahHeapRegion::make_cset() { 278 shenandoah_assert_heaplocked(); 279 // Leave age untouched. We need to consult the age when we are deciding whether to promote evacuated objects. 280 switch (_state) { 281 case _regular: 282 set_state(_cset); 283 case _cset: 284 return; 285 default: 286 report_illegal_transition("cset"); 287 } 288 } 289 290 void ShenandoahHeapRegion::make_trash() { 291 shenandoah_assert_heaplocked(); 292 reset_age(); 293 switch (_state) { 294 case _humongous_start: 295 case _humongous_cont: 296 { 297 // Reclaiming humongous regions and reclaim humongous waste. When this region is eventually recycled, we'll reclaim 298 // its used memory. At recycle time, we no longer recognize this as a humongous region. 299 decrement_humongous_waste(); 300 } 301 case _cset: 302 // Reclaiming cset regions 303 case _regular: 304 // Immediate region reclaim 305 set_state(_trash); 306 return; 307 default: 308 report_illegal_transition("trashing"); 309 } 310 } 311 312 void ShenandoahHeapRegion::make_trash_immediate() { 313 make_trash(); 314 315 // On this path, we know there are no marked objects in the region, 316 // tell marking context about it to bypass bitmap resets. 317 assert(ShenandoahHeap::heap()->gc_generation()->is_mark_complete(), "Marking should be complete here."); 318 shenandoah_assert_generations_reconciled(); 319 ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this); 320 } 321 322 void ShenandoahHeapRegion::make_empty() { 323 shenandoah_assert_heaplocked(); 324 reset_age(); 325 CENSUS_NOISE(clear_youth();) 326 switch (_state) { 327 case _trash: 328 set_state(_empty_committed); 329 _empty_time = os::elapsedTime(); 330 return; 331 default: 332 report_illegal_transition("emptying"); 333 } 334 } 335 336 void ShenandoahHeapRegion::make_uncommitted() { 337 shenandoah_assert_heaplocked(); 338 switch (_state) { 339 case _empty_committed: 340 do_uncommit(); 341 set_state(_empty_uncommitted); 342 return; 343 default: 344 report_illegal_transition("uncommiting"); 345 } 346 } 347 348 void ShenandoahHeapRegion::make_committed_bypass() { 349 shenandoah_assert_heaplocked(); 350 assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC"); 351 352 switch (_state) { 353 case _empty_uncommitted: 354 do_commit(); 355 set_state(_empty_committed); 356 return; 357 default: 358 report_illegal_transition("commit bypass"); 359 } 360 } 361 362 void ShenandoahHeapRegion::reset_alloc_metadata() { 363 _tlab_allocs = 0; 364 _gclab_allocs = 0; 365 _plab_allocs = 0; 366 } 367 368 size_t ShenandoahHeapRegion::get_shared_allocs() const { 369 return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize; 370 } 371 372 size_t ShenandoahHeapRegion::get_tlab_allocs() const { 373 return _tlab_allocs * HeapWordSize; 374 } 375 376 size_t ShenandoahHeapRegion::get_gclab_allocs() const { 377 return _gclab_allocs * HeapWordSize; 378 } 379 380 size_t ShenandoahHeapRegion::get_plab_allocs() const { 381 return _plab_allocs * HeapWordSize; 382 } 383 384 void ShenandoahHeapRegion::set_live_data(size_t s) { 385 assert(Thread::current()->is_VM_thread(), "by VM thread"); 386 _live_data = (s >> LogHeapWordSize); 387 } 388 389 void ShenandoahHeapRegion::print_on(outputStream* st) const { 390 st->print("|"); 391 st->print(SIZE_FORMAT_W(5), this->_index); 392 393 switch (_state) { 394 case _empty_uncommitted: 395 st->print("|EU "); 396 break; 397 case _empty_committed: 398 st->print("|EC "); 399 break; 400 case _regular: 401 st->print("|R "); 402 break; 403 case _humongous_start: 404 st->print("|H "); 405 break; 406 case _pinned_humongous_start: 407 st->print("|HP "); 408 break; 409 case _humongous_cont: 410 st->print("|HC "); 411 break; 412 case _cset: 413 st->print("|CS "); 414 break; 415 case _trash: 416 st->print("|TR "); 417 break; 418 case _pinned: 419 st->print("|P "); 420 break; 421 case _pinned_cset: 422 st->print("|CSP"); 423 break; 424 default: 425 ShouldNotReachHere(); 426 } 427 428 st->print("|%s", shenandoah_affiliation_code(affiliation())); 429 430 #define SHR_PTR_FORMAT "%12" PRIxPTR 431 432 st->print("|BTE " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT, 433 p2i(bottom()), p2i(top()), p2i(end())); 434 st->print("|TAMS " SHR_PTR_FORMAT, 435 p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this)))); 436 st->print("|UWM " SHR_PTR_FORMAT, 437 p2i(_update_watermark)); 438 st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); 439 st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()), proper_unit_for_byte_size(get_tlab_allocs())); 440 st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()), proper_unit_for_byte_size(get_gclab_allocs())); 441 if (ShenandoahHeap::heap()->mode()->is_generational()) { 442 st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()), proper_unit_for_byte_size(get_plab_allocs())); 443 } 444 st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()), proper_unit_for_byte_size(get_shared_allocs())); 445 st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes())); 446 st->print("|CP " SIZE_FORMAT_W(3), pin_count()); 447 st->cr(); 448 449 #undef SHR_PTR_FORMAT 450 } 451 452 // oop_iterate without closure, return true if completed without cancellation 453 bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) { 454 455 assert(!is_humongous(), "No need to fill or coalesce humongous regions"); 456 if (!is_active()) { 457 end_preemptible_coalesce_and_fill(); 458 return true; 459 } 460 461 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 462 ShenandoahMarkingContext* marking_context = heap->marking_context(); 463 464 // Expect marking to be completed before these threads invoke this service. 465 assert(heap->gc_generation()->is_mark_complete(), "sanity"); 466 shenandoah_assert_generations_reconciled(); 467 468 // All objects above TAMS are considered live even though their mark bits will not be set. Note that young- 469 // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen 470 // while the old-gen concurrent marking is ongoing. These newly promoted objects will reside above TAMS 471 // and will be treated as live during the current old-gen marking pass, even though they will not be 472 // explicitly marked. 473 HeapWord* t = marking_context->top_at_mark_start(this); 474 475 // Resume coalesce and fill from this address 476 HeapWord* obj_addr = resume_coalesce_and_fill(); 477 478 while (obj_addr < t) { 479 oop obj = cast_to_oop(obj_addr); 480 if (marking_context->is_marked(obj)) { 481 assert(obj->klass() != nullptr, "klass should not be nullptr"); 482 obj_addr += obj->size(); 483 } else { 484 // Object is not marked. Coalesce and fill dead object with dead neighbors. 485 HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t); 486 assert(next_marked_obj <= t, "next marked object cannot exceed top"); 487 size_t fill_size = next_marked_obj - obj_addr; 488 assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size"); 489 ShenandoahHeap::fill_with_object(obj_addr, fill_size); 490 heap->old_generation()->card_scan()->coalesce_objects(obj_addr, fill_size); 491 obj_addr = next_marked_obj; 492 } 493 if (cancellable && heap->cancelled_gc()) { 494 suspend_coalesce_and_fill(obj_addr); 495 return false; 496 } 497 } 498 // Mark that this region has been coalesced and filled 499 end_preemptible_coalesce_and_fill(); 500 return true; 501 } 502 503 size_t get_card_count(size_t words) { 504 assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards"); 505 assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words, 506 "slice must be integral number of cards"); 507 return words / CardTable::card_size_in_words(); 508 } 509 510 void ShenandoahHeapRegion::oop_iterate_humongous_slice_dirty(OopIterateClosure* blk, 511 HeapWord* start, size_t words, bool write_table) const { 512 assert(is_humongous(), "only humongous region here"); 513 514 ShenandoahHeapRegion* r = humongous_start_region(); 515 oop obj = cast_to_oop(r->bottom()); 516 size_t num_cards = get_card_count(words); 517 518 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); 519 ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan(); 520 size_t card_index = scanner->card_index_for_addr(start); 521 if (write_table) { 522 while (num_cards-- > 0) { 523 if (scanner->is_write_card_dirty(card_index++)) { 524 obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words())); 525 } 526 start += CardTable::card_size_in_words(); 527 } 528 } else { 529 while (num_cards-- > 0) { 530 if (scanner->is_card_dirty(card_index++)) { 531 obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words())); 532 } 533 start += CardTable::card_size_in_words(); 534 } 535 } 536 } 537 538 void ShenandoahHeapRegion::oop_iterate_humongous_slice_all(OopIterateClosure* cl, HeapWord* start, size_t words) const { 539 assert(is_humongous(), "only humongous region here"); 540 541 ShenandoahHeapRegion* r = humongous_start_region(); 542 oop obj = cast_to_oop(r->bottom()); 543 544 // Scan all data, regardless of whether cards are dirty 545 obj->oop_iterate(cl, MemRegion(start, start + words)); 546 } 547 548 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const { 549 ShenandoahHeap* heap = ShenandoahHeap::heap(); 550 assert(is_humongous(), "Must be a part of the humongous region"); 551 size_t i = index(); 552 ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this); 553 while (!r->is_humongous_start()) { 554 assert(i > 0, "Sanity"); 555 i--; 556 r = heap->get_region(i); 557 assert(r->is_humongous(), "Must be a part of the humongous region"); 558 } 559 assert(r->is_humongous_start(), "Must be"); 560 return r; 561 } 562 563 void ShenandoahHeapRegion::recycle() { 564 shenandoah_assert_heaplocked(); 565 ShenandoahHeap* heap = ShenandoahHeap::heap(); 566 ShenandoahGeneration* generation = heap->generation_for(affiliation()); 567 568 heap->decrease_used(generation, used()); 569 generation->decrement_affiliated_region_count(); 570 571 set_top(bottom()); 572 clear_live_data(); 573 reset_alloc_metadata(); 574 575 heap->marking_context()->reset_top_at_mark_start(this); 576 577 set_update_watermark(bottom()); 578 579 make_empty(); 580 581 set_affiliation(FREE); 582 if (ZapUnusedHeapArea) { 583 SpaceMangler::mangle_region(MemRegion(bottom(), end())); 584 } 585 } 586 587 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const { 588 assert(MemRegion(bottom(), end()).contains(p), 589 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 590 p2i(p), p2i(bottom()), p2i(end())); 591 if (p >= top()) { 592 return top(); 593 } else { 594 HeapWord* last = bottom(); 595 HeapWord* cur = last; 596 while (cur <= p) { 597 last = cur; 598 cur += cast_to_oop(cur)->size(); 599 } 600 shenandoah_assert_correct(nullptr, cast_to_oop(last)); 601 return last; 602 } 603 } 604 605 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const { 606 assert(MemRegion(bottom(), end()).contains(p), 607 "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")", 608 p2i(p), p2i(bottom()), p2i(end())); 609 if (p < top()) { 610 return cast_to_oop(p)->size(); 611 } else { 612 assert(p == top(), "just checking"); 613 return pointer_delta(end(), (HeapWord*) p); 614 } 615 } 616 617 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) { 618 // Absolute minimums we should not ever break. 619 static const size_t MIN_REGION_SIZE = 256*K; 620 621 if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) { 622 FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE); 623 } 624 625 // Generational Shenandoah needs this alignment for card tables. 626 if (strcmp(ShenandoahGCMode, "generational") == 0) { 627 max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint()); 628 } 629 630 size_t region_size; 631 if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) { 632 if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) { 633 err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " 634 "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).", 635 byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), 636 MIN_NUM_REGIONS, 637 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize)); 638 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 639 } 640 if (ShenandoahMinRegionSize < MIN_REGION_SIZE) { 641 err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).", 642 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), 643 byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); 644 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 645 } 646 if (ShenandoahMinRegionSize < MinTLABSize) { 647 err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).", 648 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), 649 byte_size_in_proper_unit(MinTLABSize), proper_unit_for_byte_size(MinTLABSize)); 650 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message); 651 } 652 if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) { 653 err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).", 654 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize), 655 byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE)); 656 vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message); 657 } 658 if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) { 659 err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).", 660 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize), 661 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize)); 662 vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message); 663 } 664 665 // We rapidly expand to max_heap_size in most scenarios, so that is the measure 666 // for usual heap sizes. Do not depend on initial_heap_size here. 667 region_size = max_heap_size / ShenandoahTargetNumRegions; 668 669 // Now make sure that we don't go over or under our limits. 670 region_size = MAX2(ShenandoahMinRegionSize, region_size); 671 region_size = MIN2(ShenandoahMaxRegionSize, region_size); 672 673 } else { 674 if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) { 675 err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number " 676 "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).", 677 byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size), 678 MIN_NUM_REGIONS, 679 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize)); 680 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); 681 } 682 if (ShenandoahRegionSize < ShenandoahMinRegionSize) { 683 err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).", 684 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize), 685 byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize)); 686 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); 687 } 688 if (ShenandoahRegionSize > ShenandoahMaxRegionSize) { 689 err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).", 690 byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize), 691 byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize)); 692 vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message); 693 } 694 region_size = ShenandoahRegionSize; 695 } 696 697 // Make sure region size and heap size are page aligned. 698 // If large pages are used, we ensure that region size is aligned to large page size if 699 // heap size is large enough to accommodate minimal number of regions. Otherwise, we align 700 // region size to regular page size. 701 702 // Figure out page size to use, and aligns up heap to page size 703 size_t page_size = os::vm_page_size(); 704 if (UseLargePages) { 705 size_t large_page_size = os::large_page_size(); 706 max_heap_size = align_up(max_heap_size, large_page_size); 707 if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) { 708 page_size = large_page_size; 709 } else { 710 // Should have been checked during argument initialization 711 assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size"); 712 } 713 } else { 714 max_heap_size = align_up(max_heap_size, page_size); 715 } 716 717 // Align region size to page size 718 region_size = align_up(region_size, page_size); 719 720 int region_size_log = log2i(region_size); 721 // Recalculate the region size to make sure it's a power of 722 // 2. This means that region_size is the largest power of 2 that's 723 // <= what we've calculated so far. 724 region_size = size_t(1) << region_size_log; 725 726 // Now, set up the globals. 727 guarantee(RegionSizeBytesShift == 0, "we should only set it once"); 728 RegionSizeBytesShift = (size_t)region_size_log; 729 730 guarantee(RegionSizeWordsShift == 0, "we should only set it once"); 731 RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize; 732 733 guarantee(RegionSizeBytes == 0, "we should only set it once"); 734 RegionSizeBytes = region_size; 735 RegionSizeWords = RegionSizeBytes >> LogHeapWordSize; 736 assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity"); 737 738 guarantee(RegionSizeWordsMask == 0, "we should only set it once"); 739 RegionSizeWordsMask = RegionSizeWords - 1; 740 741 guarantee(RegionSizeBytesMask == 0, "we should only set it once"); 742 RegionSizeBytesMask = RegionSizeBytes - 1; 743 744 guarantee(RegionCount == 0, "we should only set it once"); 745 RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes; 746 guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions"); 747 748 guarantee(MaxTLABSizeWords == 0, "we should only set it once"); 749 MaxTLABSizeWords = align_down(RegionSizeWords, MinObjAlignment); 750 751 guarantee(MaxTLABSizeBytes == 0, "we should only set it once"); 752 MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize; 753 assert (MaxTLABSizeBytes > MinTLABSize, "should be larger"); 754 755 return max_heap_size; 756 } 757 758 void ShenandoahHeapRegion::do_commit() { 759 ShenandoahHeap* heap = ShenandoahHeap::heap(); 760 if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) { 761 report_java_out_of_memory("Unable to commit region"); 762 } 763 if (!heap->commit_bitmap_slice(this)) { 764 report_java_out_of_memory("Unable to commit bitmaps for region"); 765 } 766 if (AlwaysPreTouch) { 767 os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size()); 768 } 769 heap->increase_committed(ShenandoahHeapRegion::region_size_bytes()); 770 } 771 772 void ShenandoahHeapRegion::do_uncommit() { 773 ShenandoahHeap* heap = ShenandoahHeap::heap(); 774 if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) { 775 report_java_out_of_memory("Unable to uncommit region"); 776 } 777 if (!heap->uncommit_bitmap_slice(this)) { 778 report_java_out_of_memory("Unable to uncommit bitmaps for region"); 779 } 780 heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes()); 781 } 782 783 void ShenandoahHeapRegion::set_state(RegionState to) { 784 EventShenandoahHeapRegionStateChange evt; 785 if (evt.should_commit()){ 786 evt.set_index((unsigned) index()); 787 evt.set_start((uintptr_t)bottom()); 788 evt.set_used(used()); 789 evt.set_from(_state); 790 evt.set_to(to); 791 evt.commit(); 792 } 793 _state = to; 794 } 795 796 void ShenandoahHeapRegion::record_pin() { 797 Atomic::add(&_critical_pins, (size_t)1); 798 } 799 800 void ShenandoahHeapRegion::record_unpin() { 801 assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index()); 802 Atomic::sub(&_critical_pins, (size_t)1); 803 } 804 805 size_t ShenandoahHeapRegion::pin_count() const { 806 return Atomic::load(&_critical_pins); 807 } 808 809 void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) { 810 ShenandoahHeap* heap = ShenandoahHeap::heap(); 811 812 ShenandoahAffiliation region_affiliation = heap->region_affiliation(this); 813 { 814 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 815 log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT 816 ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT, 817 index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation), 818 p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this))); 819 } 820 821 #ifdef ASSERT 822 { 823 // During full gc, heap->complete_marking_context() is not valid, may equal nullptr. 824 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 825 size_t idx = this->index(); 826 HeapWord* top_bitmap = ctx->top_bitmap(this); 827 828 assert(ctx->is_bitmap_range_within_region_clear(top_bitmap, _end), 829 "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx, 830 p2i(top_bitmap), p2i(_end)); 831 } 832 #endif 833 834 if (region_affiliation == new_affiliation) { 835 return; 836 } 837 838 if (!heap->mode()->is_generational()) { 839 log_trace(gc)("Changing affiliation of region %zu from %s to %s", 840 index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation)); 841 heap->set_affiliation(this, new_affiliation); 842 return; 843 } 844 845 switch (new_affiliation) { 846 case FREE: 847 assert(!has_live(), "Free region should not have live data"); 848 break; 849 case YOUNG_GENERATION: 850 reset_age(); 851 break; 852 case OLD_GENERATION: 853 break; 854 default: 855 ShouldNotReachHere(); 856 return; 857 } 858 heap->set_affiliation(this, new_affiliation); 859 } 860 861 void ShenandoahHeapRegion::decrement_humongous_waste() const { 862 assert(is_humongous(), "Should only use this for humongous regions"); 863 size_t waste_bytes = free(); 864 if (waste_bytes > 0) { 865 ShenandoahHeap* heap = ShenandoahHeap::heap(); 866 ShenandoahGeneration* generation = heap->generation_for(affiliation()); 867 heap->decrease_humongous_waste(generation, waste_bytes); 868 } 869 }