1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "gc/shared/cardTable.hpp"
  29 #include "gc/shared/space.inline.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "gc/shenandoah/shenandoahCardTable.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "memory/allocation.hpp"
  43 #include "memory/iterator.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/atomic.hpp"
  48 #include "runtime/globals_extension.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "runtime/os.hpp"
  52 #include "runtime/safepoint.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 
  55 
  56 size_t ShenandoahHeapRegion::RegionCount = 0;
  57 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  58 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  59 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  60 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  61 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  62 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  63 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  64 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  65 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  66 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  67 
  68 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
  69   _index(index),
  70   _bottom(start),
  71   _end(start + RegionSizeWords),
  72   _new_top(nullptr),
  73   _empty_time(os::elapsedTime()),
  74   _top_before_promoted(nullptr),
  75   _state(committed ? _empty_committed : _empty_uncommitted),
  76   _top(start),
  77   _tlab_allocs(0),
  78   _gclab_allocs(0),
  79   _plab_allocs(0),
  80   _live_data(0),
  81   _critical_pins(0),
  82   _update_watermark(start),
  83   _age(0)
  84 #ifdef SHENANDOAH_CENSUS_NOISE
  85   , _youth(0)
  86 #endif // SHENANDOAH_CENSUS_NOISE
  87   {
  88 
  89   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
  90          "invalid space boundaries");
  91   if (ZapUnusedHeapArea && committed) {
  92     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
  93   }
  94 }
  95 
  96 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  97   stringStream ss;
  98   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
  99   print_on(&ss);
 100   fatal("%s", ss.freeze());
 101 }
 102 
 103 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) {
 104   shenandoah_assert_heaplocked();
 105   reset_age();
 106   switch (_state) {
 107     case _empty_uncommitted:
 108       do_commit();
 109     case _empty_committed:
 110       assert(this->affiliation() == affiliation, "Region affiliation should already be established");
 111       set_state(_regular);
 112     case _regular:
 113     case _pinned:
 114       return;
 115     default:
 116       report_illegal_transition("regular allocation");
 117   }
 118 }
 119 
 120 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
 121 // behavior previously performed as a side effect of make_regular_bypass().
 122 void ShenandoahHeapRegion::make_young_maybe() {
 123   shenandoah_assert_heaplocked();
 124   switch (_state) {
 125    case _empty_uncommitted:
 126    case _empty_committed:
 127    case _cset:
 128    case _humongous_start:
 129    case _humongous_cont:
 130      if (affiliation() != YOUNG_GENERATION) {
 131        if (is_old()) {
 132          ShenandoahHeap::heap()->old_generation()->decrement_affiliated_region_count();
 133        }
 134        set_affiliation(YOUNG_GENERATION);
 135        ShenandoahHeap::heap()->young_generation()->increment_affiliated_region_count();
 136      }
 137      return;
 138    case _pinned_cset:
 139    case _regular:
 140    case _pinned:
 141      return;
 142    default:
 143      assert(false, "Unexpected _state in make_young_maybe");
 144   }
 145 }
 146 
 147 void ShenandoahHeapRegion::make_regular_bypass() {
 148   shenandoah_assert_heaplocked();
 149   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
 150           "only for full or degen GC");
 151   reset_age();
 152   switch (_state) {
 153     case _empty_uncommitted:
 154       do_commit();
 155     case _empty_committed:
 156     case _cset:
 157     case _humongous_start:
 158     case _humongous_cont:
 159       set_state(_regular);
 160       return;
 161     case _pinned_cset:
 162       set_state(_pinned);
 163       return;
 164     case _regular:
 165     case _pinned:
 166       return;
 167     default:
 168       report_illegal_transition("regular bypass");
 169   }
 170 }
 171 
 172 void ShenandoahHeapRegion::make_humongous_start() {
 173   shenandoah_assert_heaplocked();
 174   reset_age();
 175   switch (_state) {
 176     case _empty_uncommitted:
 177       do_commit();
 178     case _empty_committed:
 179       set_state(_humongous_start);
 180       return;
 181     default:
 182       report_illegal_transition("humongous start allocation");
 183   }
 184 }
 185 
 186 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) {
 187   shenandoah_assert_heaplocked();
 188   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 189   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
 190   set_affiliation(affiliation);
 191   reset_age();
 192   switch (_state) {
 193     case _empty_committed:
 194     case _regular:
 195     case _humongous_start:
 196     case _humongous_cont:
 197       set_state(_humongous_start);
 198       return;
 199     default:
 200       report_illegal_transition("humongous start bypass");
 201   }
 202 }
 203 
 204 void ShenandoahHeapRegion::make_humongous_cont() {
 205   shenandoah_assert_heaplocked();
 206   reset_age();
 207   switch (_state) {
 208     case _empty_uncommitted:
 209       do_commit();
 210     case _empty_committed:
 211      set_state(_humongous_cont);
 212       return;
 213     default:
 214       report_illegal_transition("humongous continuation allocation");
 215   }
 216 }
 217 
 218 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) {
 219   shenandoah_assert_heaplocked();
 220   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 221   set_affiliation(affiliation);
 222   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
 223   reset_age();
 224   switch (_state) {
 225     case _empty_committed:
 226     case _regular:
 227     case _humongous_start:
 228     case _humongous_cont:
 229       set_state(_humongous_cont);
 230       return;
 231     default:
 232       report_illegal_transition("humongous continuation bypass");
 233   }
 234 }
 235 
 236 void ShenandoahHeapRegion::make_pinned() {
 237   shenandoah_assert_heaplocked();
 238   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
 239 
 240   switch (_state) {
 241     case _regular:
 242       set_state(_pinned);
 243     case _pinned_cset:
 244     case _pinned:
 245       return;
 246     case _humongous_start:
 247       set_state(_pinned_humongous_start);
 248     case _pinned_humongous_start:
 249       return;
 250     case _cset:
 251       _state = _pinned_cset;
 252       return;
 253     default:
 254       report_illegal_transition("pinning");
 255   }
 256 }
 257 
 258 void ShenandoahHeapRegion::make_unpinned() {
 259   shenandoah_assert_heaplocked();
 260   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
 261 
 262   switch (_state) {
 263     case _pinned:
 264       assert(is_affiliated(), "Pinned region should be affiliated");
 265       set_state(_regular);
 266       return;
 267     case _regular:
 268     case _humongous_start:
 269       return;
 270     case _pinned_cset:
 271       set_state(_cset);
 272       return;
 273     case _pinned_humongous_start:
 274       set_state(_humongous_start);
 275       return;
 276     default:
 277       report_illegal_transition("unpinning");
 278   }
 279 }
 280 
 281 void ShenandoahHeapRegion::make_cset() {
 282   shenandoah_assert_heaplocked();
 283   // Leave age untouched.  We need to consult the age when we are deciding whether to promote evacuated objects.
 284   switch (_state) {
 285     case _regular:
 286       set_state(_cset);
 287     case _cset:
 288       return;
 289     default:
 290       report_illegal_transition("cset");
 291   }
 292 }
 293 
 294 void ShenandoahHeapRegion::make_trash() {
 295   shenandoah_assert_heaplocked();
 296   reset_age();
 297   switch (_state) {
 298     case _humongous_start:
 299     case _humongous_cont:
 300     {
 301       // Reclaiming humongous regions and reclaim humongous waste.  When this region is eventually recycled, we'll reclaim
 302       // its used memory.  At recycle time, we no longer recognize this as a humongous region.
 303       decrement_humongous_waste();
 304     }
 305     case _cset:
 306       // Reclaiming cset regions
 307     case _regular:
 308       // Immediate region reclaim
 309       set_state(_trash);
 310       return;
 311     default:
 312       report_illegal_transition("trashing");
 313   }
 314 }
 315 
 316 void ShenandoahHeapRegion::make_trash_immediate() {
 317   make_trash();
 318 
 319   // On this path, we know there are no marked objects in the region,
 320   // tell marking context about it to bypass bitmap resets.
 321   assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
 322   ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
 323 }
 324 
 325 void ShenandoahHeapRegion::make_empty() {
 326   shenandoah_assert_heaplocked();
 327   reset_age();
 328   CENSUS_NOISE(clear_youth();)
 329   switch (_state) {
 330     case _trash:
 331       set_state(_empty_committed);
 332       _empty_time = os::elapsedTime();
 333       return;
 334     default:
 335       report_illegal_transition("emptying");
 336   }
 337 }
 338 
 339 void ShenandoahHeapRegion::make_uncommitted() {
 340   shenandoah_assert_heaplocked();
 341   switch (_state) {
 342     case _empty_committed:
 343       do_uncommit();
 344       set_state(_empty_uncommitted);
 345       return;
 346     default:
 347       report_illegal_transition("uncommiting");
 348   }
 349 }
 350 
 351 void ShenandoahHeapRegion::make_committed_bypass() {
 352   shenandoah_assert_heaplocked();
 353   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 354 
 355   switch (_state) {
 356     case _empty_uncommitted:
 357       do_commit();
 358       set_state(_empty_committed);
 359       return;
 360     default:
 361       report_illegal_transition("commit bypass");
 362   }
 363 }
 364 
 365 void ShenandoahHeapRegion::reset_alloc_metadata() {
 366   _tlab_allocs = 0;
 367   _gclab_allocs = 0;
 368   _plab_allocs = 0;
 369 }
 370 
 371 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 372   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
 373 }
 374 
 375 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 376   return _tlab_allocs * HeapWordSize;
 377 }
 378 
 379 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 380   return _gclab_allocs * HeapWordSize;
 381 }
 382 
 383 size_t ShenandoahHeapRegion::get_plab_allocs() const {
 384   return _plab_allocs * HeapWordSize;
 385 }
 386 
 387 void ShenandoahHeapRegion::set_live_data(size_t s) {
 388   assert(Thread::current()->is_VM_thread(), "by VM thread");
 389   _live_data = (s >> LogHeapWordSize);
 390 }
 391 
 392 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 393   st->print("|");
 394   st->print(SIZE_FORMAT_W(5), this->_index);
 395 
 396   switch (_state) {
 397     case _empty_uncommitted:
 398       st->print("|EU ");
 399       break;
 400     case _empty_committed:
 401       st->print("|EC ");
 402       break;
 403     case _regular:
 404       st->print("|R  ");
 405       break;
 406     case _humongous_start:
 407       st->print("|H  ");
 408       break;
 409     case _pinned_humongous_start:
 410       st->print("|HP ");
 411       break;
 412     case _humongous_cont:
 413       st->print("|HC ");
 414       break;
 415     case _cset:
 416       st->print("|CS ");
 417       break;
 418     case _trash:
 419       st->print("|TR ");
 420       break;
 421     case _pinned:
 422       st->print("|P  ");
 423       break;
 424     case _pinned_cset:
 425       st->print("|CSP");
 426       break;
 427     default:
 428       ShouldNotReachHere();
 429   }
 430 
 431   st->print("|%s", shenandoah_affiliation_code(affiliation()));
 432 
 433 #define SHR_PTR_FORMAT "%12" PRIxPTR
 434 
 435   st->print("|BTE " SHR_PTR_FORMAT  ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
 436             p2i(bottom()), p2i(top()), p2i(end()));
 437   st->print("|TAMS " SHR_PTR_FORMAT,
 438             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 439   st->print("|UWM " SHR_PTR_FORMAT,
 440             p2i(_update_watermark));
 441   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 442   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 443   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
 444   if (ShenandoahHeap::heap()->mode()->is_generational()) {
 445     st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
 446   }
 447   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 448   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 449   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
 450   st->cr();
 451 
 452 #undef SHR_PTR_FORMAT
 453 }
 454 
 455 // oop_iterate without closure and without cancellation.  always return true.
 456 bool ShenandoahHeapRegion::oop_fill_and_coalesce_without_cancel() {
 457   HeapWord* obj_addr = resume_coalesce_and_fill();
 458 
 459   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
 460   if (!is_active()) {
 461     end_preemptible_coalesce_and_fill();
 462     return true;
 463   }
 464 
 465   ShenandoahHeap* heap = ShenandoahHeap::heap();
 466   ShenandoahMarkingContext* marking_context = heap->marking_context();
 467   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
 468   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
 469   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
 470   // and will be treated as live during the current old-gen marking pass, even though they will not be
 471   // explicitly marked.
 472   HeapWord* t = marking_context->top_at_mark_start(this);
 473 
 474   // Expect marking to be completed before these threads invoke this service.
 475   assert(heap->active_generation()->is_mark_complete(), "sanity");
 476   while (obj_addr < t) {
 477     oop obj = cast_to_oop(obj_addr);
 478     if (marking_context->is_marked(obj)) {
 479       assert(obj->klass() != nullptr, "klass should not be nullptr");
 480       obj_addr += obj->size();
 481     } else {
 482       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 483       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 484       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 485       size_t fill_size = next_marked_obj - obj_addr;
 486       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
 487       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 488       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
 489       obj_addr = next_marked_obj;
 490     }
 491   }
 492   // Mark that this region has been coalesced and filled
 493   end_preemptible_coalesce_and_fill();
 494   return true;
 495 }
 496 
 497 // oop_iterate without closure, return true if completed without cancellation
 498 bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
 499   HeapWord* obj_addr = resume_coalesce_and_fill();
 500   // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
 501   const size_t preemption_stride = 128;
 502 
 503   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
 504   if (!is_active()) {
 505     end_preemptible_coalesce_and_fill();
 506     return true;
 507   }
 508 
 509   ShenandoahHeap* heap = ShenandoahHeap::heap();
 510   ShenandoahMarkingContext* marking_context = heap->marking_context();
 511   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
 512   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
 513   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
 514   // and will be treated as live during the current old-gen marking pass, even though they will not be
 515   // explicitly marked.
 516   HeapWord* t = marking_context->top_at_mark_start(this);
 517 
 518   // Expect marking to be completed before these threads invoke this service.
 519   assert(heap->active_generation()->is_mark_complete(), "sanity");
 520 
 521   size_t ops_before_preempt_check = preemption_stride;
 522   while (obj_addr < t) {
 523     oop obj = cast_to_oop(obj_addr);
 524     if (marking_context->is_marked(obj)) {
 525       assert(obj->klass() != nullptr, "klass should not be nullptr");
 526       obj_addr += obj->size();
 527     } else {
 528       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 529       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 530       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 531       size_t fill_size = next_marked_obj - obj_addr;
 532       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
 533       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 534       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
 535       obj_addr = next_marked_obj;
 536     }
 537     if (ops_before_preempt_check-- == 0) {
 538       if (heap->cancelled_gc()) {
 539         suspend_coalesce_and_fill(obj_addr);
 540         return false;
 541       }
 542       ops_before_preempt_check = preemption_stride;
 543     }
 544   }
 545   // Mark that this region has been coalesced and filled
 546   end_preemptible_coalesce_and_fill();
 547   return true;
 548 }
 549 
 550 void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
 551   if (!is_active()) return;
 552   if (is_humongous()) {
 553     // No need to fill dead within humongous regions.  Either the entire region is dead, or the entire region is
 554     // unchanged.  A humongous region holds no more than one humongous object.
 555     oop_iterate_humongous(blk);
 556   } else {
 557     global_oop_iterate_objects_and_fill_dead(blk);
 558   }
 559 }
 560 
 561 void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
 562   assert(!is_humongous(), "no humongous region here");
 563   HeapWord* obj_addr = bottom();
 564 
 565   ShenandoahHeap* heap = ShenandoahHeap::heap();
 566   ShenandoahMarkingContext* marking_context = heap->marking_context();
 567   RememberedScanner* rem_set_scanner = heap->card_scan();
 568   // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
 569   HeapWord* t = marking_context->top_at_mark_start(this);
 570 
 571   assert(heap->active_generation()->is_mark_complete(), "sanity");
 572 
 573   while (obj_addr < t) {
 574     oop obj = cast_to_oop(obj_addr);
 575     if (marking_context->is_marked(obj)) {
 576       assert(obj->klass() != nullptr, "klass should not be nullptr");
 577       // when promoting an entire region, we have to register the marked objects as well
 578       obj_addr += obj->oop_iterate_size(blk);
 579     } else {
 580       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 581       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 582       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 583       size_t fill_size = next_marked_obj - obj_addr;
 584       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
 585       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 586       // coalesce_objects() unregisters all but first object subsumed within coalesced range.
 587       rem_set_scanner->coalesce_objects(obj_addr, fill_size);
 588       obj_addr = next_marked_obj;
 589     }
 590   }
 591 
 592   // Any object above TAMS and below top() is considered live.
 593   t = top();
 594   while (obj_addr < t) {
 595     oop obj = cast_to_oop(obj_addr);
 596     obj_addr += obj->oop_iterate_size(blk);
 597   }
 598 }
 599 
 600 // DO NOT CANCEL.  If this worker thread has accepted responsibility for scanning a particular range of addresses, it
 601 // must finish the work before it can be cancelled.
 602 void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only,
 603                                                        HeapWord* start, size_t words, bool write_table) {
 604   assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
 605   assert(is_humongous(), "only humongous region here");
 606   ShenandoahHeap* heap = ShenandoahHeap::heap();
 607 
 608   // Find head.
 609   ShenandoahHeapRegion* r = humongous_start_region();
 610   assert(r->is_humongous_start(), "need humongous head here");
 611   assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
 612          "slice must be integral number of cards");
 613 
 614   oop obj = cast_to_oop(r->bottom());
 615   RememberedScanner* scanner = ShenandoahHeap::heap()->card_scan();
 616   size_t card_index = scanner->card_index_for_addr(start);
 617   size_t num_cards = words / CardTable::card_size_in_words();
 618 
 619   if (dirty_only) {
 620     if (write_table) {
 621       while (num_cards-- > 0) {
 622         if (scanner->is_write_card_dirty(card_index++)) {
 623           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
 624         }
 625         start += CardTable::card_size_in_words();
 626       }
 627     } else {
 628       while (num_cards-- > 0) {
 629         if (scanner->is_card_dirty(card_index++)) {
 630           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
 631         }
 632         start += CardTable::card_size_in_words();
 633       }
 634     }
 635   } else {
 636     // Scan all data, regardless of whether cards are dirty
 637     obj->oop_iterate(blk, MemRegion(start, start + num_cards * CardTable::card_size_in_words()));
 638   }
 639 }
 640 
 641 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk, HeapWord* start, size_t words) {
 642   assert(is_humongous(), "only humongous region here");
 643   // Find head.
 644   ShenandoahHeapRegion* r = humongous_start_region();
 645   assert(r->is_humongous_start(), "need humongous head here");
 646   oop obj = cast_to_oop(r->bottom());
 647   obj->oop_iterate(blk, MemRegion(start, start + words));
 648 }
 649 
 650 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
 651   assert(is_humongous(), "only humongous region here");
 652   // Find head.
 653   ShenandoahHeapRegion* r = humongous_start_region();
 654   assert(r->is_humongous_start(), "need humongous head here");
 655   oop obj = cast_to_oop(r->bottom());
 656   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 657 }
 658 
 659 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 660   ShenandoahHeap* heap = ShenandoahHeap::heap();
 661   assert(is_humongous(), "Must be a part of the humongous region");
 662   size_t i = index();
 663   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 664   while (!r->is_humongous_start()) {
 665     assert(i > 0, "Sanity");
 666     i--;
 667     r = heap->get_region(i);
 668     assert(r->is_humongous(), "Must be a part of the humongous region");
 669   }
 670   assert(r->is_humongous_start(), "Must be");
 671   return r;
 672 }
 673 
 674 void ShenandoahHeapRegion::recycle() {
 675   shenandoah_assert_heaplocked();
 676   ShenandoahHeap* heap = ShenandoahHeap::heap();
 677   ShenandoahGeneration* generation = heap->generation_for(affiliation());
 678   heap->decrease_used(generation, used());
 679 
 680   set_top(bottom());
 681   clear_live_data();
 682 
 683   reset_alloc_metadata();
 684 
 685   heap->marking_context()->reset_top_at_mark_start(this);
 686   set_update_watermark(bottom());
 687 
 688   make_empty();
 689   ShenandoahHeap::heap()->generation_for(affiliation())->decrement_affiliated_region_count();
 690   set_affiliation(FREE);
 691   if (ZapUnusedHeapArea) {
 692     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
 693   }
 694 }
 695 
 696 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
 697   assert(MemRegion(bottom(), end()).contains(p),
 698          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 699          p2i(p), p2i(bottom()), p2i(end()));
 700   if (p >= top()) {
 701     return top();
 702   } else {
 703     HeapWord* last = bottom();
 704     HeapWord* cur = last;
 705     while (cur <= p) {
 706       last = cur;
 707       cur += cast_to_oop(cur)->size();
 708     }
 709     shenandoah_assert_correct(nullptr, cast_to_oop(last));
 710     return last;
 711   }
 712 }
 713 
 714 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
 715   assert(MemRegion(bottom(), end()).contains(p),
 716          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 717          p2i(p), p2i(bottom()), p2i(end()));
 718   if (p < top()) {
 719     return cast_to_oop(p)->size();
 720   } else {
 721     assert(p == top(), "just checking");
 722     return pointer_delta(end(), (HeapWord*) p);
 723   }
 724 }
 725 
 726 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
 727   // Absolute minimums we should not ever break.
 728   static const size_t MIN_REGION_SIZE = 256*K;
 729 
 730   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 731     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 732   }
 733 
 734   // Generational Shenandoah needs this alignment for card tables.
 735   if (strcmp(ShenandoahGCMode, "generational") == 0) {
 736     max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint());
 737   }
 738 
 739   size_t region_size;
 740   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
 741     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 742       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 743                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
 744                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 745                       MIN_NUM_REGIONS,
 746                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
 747       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 748     }
 749     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 750       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
 751                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 752                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 753       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 754     }
 755     if (ShenandoahMinRegionSize < MinTLABSize) {
 756       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
 757                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 758                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
 759       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 760     }
 761     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 762       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
 763                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
 764                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 765       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 766     }
 767     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 768       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
 769                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 770                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 771       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 772     }
 773 
 774     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
 775     // for usual heap sizes. Do not depend on initial_heap_size here.
 776     region_size = max_heap_size / ShenandoahTargetNumRegions;
 777 
 778     // Now make sure that we don't go over or under our limits.
 779     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 780     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 781 
 782   } else {
 783     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 784       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 785                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
 786                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 787                       MIN_NUM_REGIONS,
 788                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
 789       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 790     }
 791     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
 792       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
 793                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
 794                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
 795       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 796     }
 797     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
 798       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
 799                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
 800                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 801       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 802     }
 803     region_size = ShenandoahRegionSize;
 804   }
 805 
 806   // Make sure region size and heap size are page aligned.
 807   // If large pages are used, we ensure that region size is aligned to large page size if
 808   // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
 809   // region size to regular page size.
 810 
 811   // Figure out page size to use, and aligns up heap to page size
 812   size_t page_size = os::vm_page_size();
 813   if (UseLargePages) {
 814     size_t large_page_size = os::large_page_size();
 815     max_heap_size = align_up(max_heap_size, large_page_size);
 816     if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
 817       page_size = large_page_size;
 818     } else {
 819       // Should have been checked during argument initialization
 820       assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
 821     }
 822   } else {
 823     max_heap_size = align_up(max_heap_size, page_size);
 824   }
 825 
 826   // Align region size to page size
 827   region_size = align_up(region_size, page_size);
 828 
 829   int region_size_log = log2i(region_size);
 830   // Recalculate the region size to make sure it's a power of
 831   // 2. This means that region_size is the largest power of 2 that's
 832   // <= what we've calculated so far.
 833   region_size = size_t(1) << region_size_log;
 834 
 835   // Now, set up the globals.
 836   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 837   RegionSizeBytesShift = (size_t)region_size_log;
 838 
 839   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 840   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 841 
 842   guarantee(RegionSizeBytes == 0, "we should only set it once");
 843   RegionSizeBytes = region_size;
 844   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 845   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 846 
 847   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 848   RegionSizeWordsMask = RegionSizeWords - 1;
 849 
 850   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 851   RegionSizeBytesMask = RegionSizeBytes - 1;
 852 
 853   guarantee(RegionCount == 0, "we should only set it once");
 854   RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
 855   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
 856 
 857   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 858   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 859   HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment);
 860   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 861 
 862   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 863   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 864   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 865 
 866   // The rationale for trimming the TLAB sizes has to do with the raciness in
 867   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 868   // about next free size, gets the answer for region #N, goes away for a while, then
 869   // tries to allocate in region #N, and fail because some other thread have claimed part
 870   // of the region #N, and then the freeset allocation code has to retire the region #N,
 871   // before moving the allocation to region #N+1.
 872   //
 873   // The worst case realizes when "answer" is "region size", which means it could
 874   // prematurely retire an entire region. Having smaller TLABs does not fix that
 875   // completely, but reduces the probability of too wasteful region retirement.
 876   // With current divisor, we will waste no more than 1/8 of region size in the worst
 877   // case. This also has a secondary effect on collection set selection: even under
 878   // the race, the regions would be at least 7/8 used, which allows relying on
 879   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 880   // below the garbage threshold that would never be considered for collection.
 881   //
 882   // The whole thing is mitigated if Elastic TLABs are enabled.
 883   //
 884   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 885   MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
 886   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
 887 
 888   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 889   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
 890   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 891 
 892   return max_heap_size;
 893 }
 894 
 895 void ShenandoahHeapRegion::do_commit() {
 896   ShenandoahHeap* heap = ShenandoahHeap::heap();
 897   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
 898     report_java_out_of_memory("Unable to commit region");
 899   }
 900   if (!heap->commit_bitmap_slice(this)) {
 901     report_java_out_of_memory("Unable to commit bitmaps for region");
 902   }
 903   if (AlwaysPreTouch) {
 904     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
 905   }
 906   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 907 }
 908 
 909 void ShenandoahHeapRegion::do_uncommit() {
 910   ShenandoahHeap* heap = ShenandoahHeap::heap();
 911   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
 912     report_java_out_of_memory("Unable to uncommit region");
 913   }
 914   if (!heap->uncommit_bitmap_slice(this)) {
 915     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 916   }
 917   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 918 }
 919 
 920 void ShenandoahHeapRegion::set_state(RegionState to) {
 921   EventShenandoahHeapRegionStateChange evt;
 922   if (evt.should_commit()){
 923     evt.set_index((unsigned) index());
 924     evt.set_start((uintptr_t)bottom());
 925     evt.set_used(used());
 926     evt.set_from(_state);
 927     evt.set_to(to);
 928     evt.commit();
 929   }
 930   _state = to;
 931 }
 932 
 933 void ShenandoahHeapRegion::record_pin() {
 934   Atomic::add(&_critical_pins, (size_t)1);
 935 }
 936 
 937 void ShenandoahHeapRegion::record_unpin() {
 938   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
 939   Atomic::sub(&_critical_pins, (size_t)1);
 940 }
 941 
 942 size_t ShenandoahHeapRegion::pin_count() const {
 943   return Atomic::load(&_critical_pins);
 944 }
 945 
 946 void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) {
 947   ShenandoahHeap* heap = ShenandoahHeap::heap();
 948 
 949   ShenandoahAffiliation region_affiliation = heap->region_affiliation(this);
 950   {
 951     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 952     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
 953                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
 954                   index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation),
 955                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
 956   }
 957 
 958 #ifdef ASSERT
 959   {
 960     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
 961     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 962     size_t idx = this->index();
 963     HeapWord* top_bitmap = ctx->top_bitmap(this);
 964 
 965     assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
 966            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
 967            p2i(top_bitmap), p2i(_end));
 968   }
 969 #endif
 970 
 971   if (region_affiliation == new_affiliation) {
 972     return;
 973   }
 974 
 975   if (!heap->mode()->is_generational()) {
 976     log_trace(gc)("Changing affiliation of region %zu from %s to %s",
 977                   index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation));
 978     heap->set_affiliation(this, new_affiliation);
 979     return;
 980   }
 981 
 982   switch (new_affiliation) {
 983     case FREE:
 984       assert(!has_live(), "Free region should not have live data");
 985       break;
 986     case YOUNG_GENERATION:
 987       reset_age();
 988       break;
 989     case OLD_GENERATION:
 990       // TODO: should we reset_age() for OLD as well?  Examine invocations of set_affiliation(). Some contexts redundantly
 991       //       invoke reset_age().
 992       break;
 993     default:
 994       ShouldNotReachHere();
 995       return;
 996   }
 997   heap->set_affiliation(this, new_affiliation);
 998 }
 999 
1000 // When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
1001 // set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
1002 // We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting poitners"
1003 // contained herein.
1004 void ShenandoahHeapRegion::promote_in_place() {
1005   ShenandoahHeap* heap = ShenandoahHeap::heap();
1006   ShenandoahMarkingContext* marking_context = heap->marking_context();
1007   HeapWord* tams = marking_context->top_at_mark_start(this);
1008   assert(heap->active_generation()->is_mark_complete(), "sanity");
1009   assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
1010   assert(is_young(), "Only young regions can be promoted");
1011   assert(is_regular(), "Use different service to promote humongous regions");
1012   assert(age() >= heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
1013 
1014   ShenandoahOldGeneration* old_gen = heap->old_generation();
1015   ShenandoahYoungGeneration* young_gen = heap->young_generation();
1016   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1017 
1018   assert(get_top_before_promote() == tams, "Cannot promote regions in place if top has advanced beyond TAMS");
1019 
1020   // Rebuild the remembered set information and mark the entire range as DIRTY.  We do NOT scan the content of this
1021   // range to determine which cards need to be DIRTY.  That would force us to scan the region twice, once now, and
1022   // once during the subsequent remembered set scan.  Instead, we blindly (conservatively) mark everything as DIRTY
1023   // now and then sort out the CLEAN pages during the next remembered set scan.
1024   //
1025   // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
1026   // then registering every live object and every coalesced range of free objects in the loop that follows.
1027   heap->card_scan()->reset_object_range(bottom(), end());
1028   heap->card_scan()->mark_range_as_dirty(bottom(), get_top_before_promote() - bottom());
1029 
1030   // TODO: use an existing coalesce-and-fill function rather than replicating the code here.
1031   HeapWord* obj_addr = bottom();
1032   while (obj_addr < tams) {
1033     oop obj = cast_to_oop(obj_addr);
1034     if (marking_context->is_marked(obj)) {
1035       assert(obj->klass() != nullptr, "klass should not be NULL");
1036       // This thread is responsible for registering all objects in this region.  No need for lock.
1037       heap->card_scan()->register_object_without_lock(obj_addr);
1038       obj_addr += obj->size();
1039     } else {
1040       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
1041       assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
1042       size_t fill_size = next_marked_obj - obj_addr;
1043       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
1044       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
1045       heap->card_scan()->register_object_without_lock(obj_addr);
1046       obj_addr = next_marked_obj;
1047     }
1048   }
1049   // We do not need to scan above TAMS because restored top equals tams
1050   assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
1051 
1052   {
1053     ShenandoahHeapLocker locker(heap->lock());
1054 
1055     HeapWord* update_watermark = get_update_watermark();
1056 
1057     // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
1058     // is_collector_free range.
1059     restore_top_before_promote();
1060 
1061     size_t region_capacity = free();
1062     size_t region_used = used();
1063 
1064     // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
1065     assert(update_watermark >= top(), "original top cannot exceed preserved update_watermark");
1066     set_update_watermark(top());
1067 
1068     // Unconditionally transfer one region from young to old to represent the newly promoted region.
1069     // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
1070     // if there are already enough unaffiliated regions in old to account for this newly promoted region.
1071     // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
1072     // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
1073     // we would be trading a fully empty region for a partially used region.
1074 
1075     young_gen->decrease_used(region_used);
1076     young_gen->decrement_affiliated_region_count();
1077 
1078     // transfer_to_old() increases capacity of old and decreases capacity of young
1079     heap->generation_sizer()->force_transfer_to_old(1);
1080     set_affiliation(OLD_GENERATION);
1081 
1082     old_gen->increment_affiliated_region_count();
1083     old_gen->increase_used(region_used);
1084 
1085     // add_old_collector_free_region() increases promoted_reserve() if available space exceeds PLAB::min_size()
1086     heap->free_set()->add_old_collector_free_region(this);
1087   }
1088 }
1089 
1090 void ShenandoahHeapRegion::promote_humongous() {
1091   ShenandoahHeap* heap = ShenandoahHeap::heap();
1092   ShenandoahMarkingContext* marking_context = heap->marking_context();
1093   assert(heap->active_generation()->is_mark_complete(), "sanity");
1094   assert(is_young(), "Only young regions can be promoted");
1095   assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
1096   assert(age() >= heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
1097 
1098   ShenandoahGeneration* old_generation = heap->old_generation();
1099   ShenandoahGeneration* young_generation = heap->young_generation();
1100 
1101   oop obj = cast_to_oop(bottom());
1102   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
1103 
1104   // TODO: Consider not promoting humongous objects that represent primitive arrays.  Leaving a primitive array
1105   // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not
1106   // scanned.  Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when
1107   // it becomes garbage.  Better to not make this change until sizes of young-gen and old-gen are completely
1108   // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
1109   // has carefully analyzed the required sizes of an application's young-gen and old-gen.
1110   size_t used_bytes = obj->size() * HeapWordSize;
1111   size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
1112   size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
1113   size_t index_limit = index() + spanned_regions;
1114   {
1115     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
1116     // young to old.
1117     ShenandoahHeapLocker locker(heap->lock());
1118 
1119     // We promote humongous objects unconditionally, without checking for availability.  We adjust
1120     // usage totals, including humongous waste, after evacuation is done.
1121     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
1122 
1123     young_generation->decrease_used(used_bytes);
1124     young_generation->decrease_humongous_waste(humongous_waste);
1125     young_generation->decrease_affiliated_region_count(spanned_regions);
1126 
1127     // transfer_to_old() increases capacity of old and decreases capacity of young
1128     heap->generation_sizer()->force_transfer_to_old(spanned_regions);
1129 
1130     // For this region and each humongous continuation region spanned by this humongous object, change
1131     // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
1132     // in the last humongous region that is not spanned by obj is currently not used.
1133     for (size_t i = index(); i < index_limit; i++) {
1134       ShenandoahHeapRegion* r = heap->get_region(i);
1135       log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
1136                     r->index(), p2i(r->bottom()), p2i(r->top()));
1137       // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
1138       r->set_affiliation(OLD_GENERATION);
1139     }
1140 
1141     old_generation->increase_affiliated_region_count(spanned_regions);
1142     old_generation->increase_used(used_bytes);
1143     old_generation->increase_humongous_waste(humongous_waste);
1144   }
1145 
1146   // Since this region may have served previously as OLD, it may hold obsolete object range info.
1147   heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
1148   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
1149   heap->card_scan()->register_object_without_lock(bottom());
1150 
1151   if (obj->is_typeArray()) {
1152     // Primitive arrays don't need to be scanned.
1153     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1154                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
1155     heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
1156   } else {
1157     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1158                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
1159     heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
1160   }
1161 }
1162 
1163 void ShenandoahHeapRegion::decrement_humongous_waste() const {
1164   assert(is_humongous(), "Should only use this for humongous regions");
1165   size_t waste_bytes = free();
1166   if (waste_bytes > 0) {
1167     ShenandoahHeap* heap = ShenandoahHeap::heap();
1168     ShenandoahGeneration* generation = heap->generation_for(affiliation());
1169     heap->decrease_humongous_waste(generation, waste_bytes);
1170   }
1171 }