1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "gc/shared/cardTable.hpp"
  29 #include "gc/shared/space.inline.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "gc/shenandoah/shenandoahCardTable.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "memory/allocation.hpp"
  43 #include "memory/iterator.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/atomic.hpp"
  48 #include "runtime/globals_extension.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "runtime/os.hpp"
  52 #include "runtime/safepoint.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 
  55 
  56 size_t ShenandoahHeapRegion::RegionCount = 0;
  57 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  58 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  59 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  60 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  61 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  62 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  63 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  64 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  65 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  66 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  67 
  68 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
  69   _index(index),
  70   _bottom(start),
  71   _end(start + RegionSizeWords),
  72   _new_top(nullptr),
  73   _empty_time(os::elapsedTime()),
  74   _state(committed ? _empty_committed : _empty_uncommitted),
  75   _top(start),
  76   _tlab_allocs(0),
  77   _gclab_allocs(0),
  78   _plab_allocs(0),
  79   _has_young_lab(false),
  80   _live_data(0),
  81   _critical_pins(0),
  82   _update_watermark(start),
  83   _age(0) {
  84 
  85   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
  86          "invalid space boundaries");
  87   if (ZapUnusedHeapArea && committed) {
  88     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
  89   }
  90 }
  91 
  92 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  93   stringStream ss;
  94   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
  95   print_on(&ss);
  96   fatal("%s", ss.freeze());
  97 }
  98 
  99 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) {
 100   shenandoah_assert_heaplocked();
 101   reset_age();
 102   switch (_state) {
 103     case _empty_uncommitted:
 104       do_commit();
 105     case _empty_committed:
 106       assert(this->affiliation() == affiliation, "Region affiliation should already be established");
 107       set_state(_regular);
 108     case _regular:
 109     case _pinned:
 110       return;
 111     default:
 112       report_illegal_transition("regular allocation");
 113   }
 114 }
 115 
 116 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
 117 // behavior previously performed as a side effect of make_regular_bypass().
 118 void ShenandoahHeapRegion::make_young_maybe() {
 119   shenandoah_assert_heaplocked();
 120   switch (_state) {
 121    case _empty_uncommitted:
 122    case _empty_committed:
 123    case _cset:
 124    case _humongous_start:
 125    case _humongous_cont:
 126      if (affiliation() != YOUNG_GENERATION) {
 127        if (is_old()) {
 128          ShenandoahHeap::heap()->old_generation()->decrement_affiliated_region_count();
 129        }
 130        set_affiliation(YOUNG_GENERATION);
 131        ShenandoahHeap::heap()->young_generation()->increment_affiliated_region_count();
 132      }
 133      return;
 134    case _pinned_cset:
 135    case _regular:
 136    case _pinned:
 137      return;
 138    default:
 139      assert(false, "Unexpected _state in make_young_maybe");
 140   }
 141 }
 142 
 143 void ShenandoahHeapRegion::make_regular_bypass() {
 144   shenandoah_assert_heaplocked();
 145   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
 146           "only for full or degen GC");
 147   reset_age();
 148   switch (_state) {
 149     case _empty_uncommitted:
 150       do_commit();
 151     case _empty_committed:
 152     case _cset:
 153     case _humongous_start:
 154     case _humongous_cont:
 155       set_state(_regular);
 156       return;
 157     case _pinned_cset:
 158       set_state(_pinned);
 159       return;
 160     case _regular:
 161     case _pinned:
 162       return;
 163     default:
 164       report_illegal_transition("regular bypass");
 165   }
 166 }
 167 
 168 void ShenandoahHeapRegion::make_humongous_start() {
 169   shenandoah_assert_heaplocked();
 170   reset_age();
 171   switch (_state) {
 172     case _empty_uncommitted:
 173       do_commit();
 174     case _empty_committed:
 175       set_state(_humongous_start);
 176       return;
 177     default:
 178       report_illegal_transition("humongous start allocation");
 179   }
 180 }
 181 
 182 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) {
 183   shenandoah_assert_heaplocked();
 184   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 185   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
 186   set_affiliation(affiliation);
 187   reset_age();
 188   switch (_state) {
 189     case _empty_committed:
 190     case _regular:
 191     case _humongous_start:
 192     case _humongous_cont:
 193       set_state(_humongous_start);
 194       return;
 195     default:
 196       report_illegal_transition("humongous start bypass");
 197   }
 198 }
 199 
 200 void ShenandoahHeapRegion::make_humongous_cont() {
 201   shenandoah_assert_heaplocked();
 202   reset_age();
 203   switch (_state) {
 204     case _empty_uncommitted:
 205       do_commit();
 206     case _empty_committed:
 207      set_state(_humongous_cont);
 208       return;
 209     default:
 210       report_illegal_transition("humongous continuation allocation");
 211   }
 212 }
 213 
 214 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) {
 215   shenandoah_assert_heaplocked();
 216   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 217   set_affiliation(affiliation);
 218   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
 219   reset_age();
 220   switch (_state) {
 221     case _empty_committed:
 222     case _regular:
 223     case _humongous_start:
 224     case _humongous_cont:
 225       set_state(_humongous_cont);
 226       return;
 227     default:
 228       report_illegal_transition("humongous continuation bypass");
 229   }
 230 }
 231 
 232 void ShenandoahHeapRegion::make_pinned() {
 233   shenandoah_assert_heaplocked();
 234   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
 235 
 236   switch (_state) {
 237     case _regular:
 238       set_state(_pinned);
 239     case _pinned_cset:
 240     case _pinned:
 241       return;
 242     case _humongous_start:
 243       set_state(_pinned_humongous_start);
 244     case _pinned_humongous_start:
 245       return;
 246     case _cset:
 247       _state = _pinned_cset;
 248       return;
 249     default:
 250       report_illegal_transition("pinning");
 251   }
 252 }
 253 
 254 void ShenandoahHeapRegion::make_unpinned() {
 255   shenandoah_assert_heaplocked();
 256   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
 257 
 258   switch (_state) {
 259     case _pinned:
 260       assert(is_affiliated(), "Pinned region should be affiliated");
 261       set_state(_regular);
 262       return;
 263     case _regular:
 264     case _humongous_start:
 265       return;
 266     case _pinned_cset:
 267       set_state(_cset);
 268       return;
 269     case _pinned_humongous_start:
 270       set_state(_humongous_start);
 271       return;
 272     default:
 273       report_illegal_transition("unpinning");
 274   }
 275 }
 276 
 277 void ShenandoahHeapRegion::make_cset() {
 278   shenandoah_assert_heaplocked();
 279   // Leave age untouched.  We need to consult the age when we are deciding whether to promote evacuated objects.
 280   switch (_state) {
 281     case _regular:
 282       set_state(_cset);
 283     case _cset:
 284       return;
 285     default:
 286       report_illegal_transition("cset");
 287   }
 288 }
 289 
 290 void ShenandoahHeapRegion::make_trash() {
 291   shenandoah_assert_heaplocked();
 292   reset_age();
 293   switch (_state) {
 294     case _humongous_start:
 295     case _humongous_cont:
 296     {
 297       // Reclaiming humongous regions and reclaim humongous waste.  When this region is eventually recycled, we'll reclaim
 298       // its used memory.  At recycle time, we no longer recognize this as a humongous region.
 299       decrement_humongous_waste();
 300     }
 301     case _cset:
 302       // Reclaiming cset regions
 303     case _regular:
 304       // Immediate region reclaim
 305       set_state(_trash);
 306       return;
 307     default:
 308       report_illegal_transition("trashing");
 309   }
 310 }
 311 
 312 void ShenandoahHeapRegion::make_trash_immediate() {
 313   make_trash();
 314 
 315   // On this path, we know there are no marked objects in the region,
 316   // tell marking context about it to bypass bitmap resets.
 317   assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
 318   ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
 319 }
 320 
 321 void ShenandoahHeapRegion::make_empty() {
 322   shenandoah_assert_heaplocked();
 323   reset_age();
 324   switch (_state) {
 325     case _trash:
 326       set_state(_empty_committed);
 327       _empty_time = os::elapsedTime();
 328       return;
 329     default:
 330       report_illegal_transition("emptying");
 331   }
 332 }
 333 
 334 void ShenandoahHeapRegion::make_uncommitted() {
 335   shenandoah_assert_heaplocked();
 336   switch (_state) {
 337     case _empty_committed:
 338       do_uncommit();
 339       set_state(_empty_uncommitted);
 340       return;
 341     default:
 342       report_illegal_transition("uncommiting");
 343   }
 344 }
 345 
 346 void ShenandoahHeapRegion::make_committed_bypass() {
 347   shenandoah_assert_heaplocked();
 348   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 349 
 350   switch (_state) {
 351     case _empty_uncommitted:
 352       do_commit();
 353       set_state(_empty_committed);
 354       return;
 355     default:
 356       report_illegal_transition("commit bypass");
 357   }
 358 }
 359 
 360 void ShenandoahHeapRegion::reset_alloc_metadata() {
 361   _tlab_allocs = 0;
 362   _gclab_allocs = 0;
 363   _plab_allocs = 0;
 364 }
 365 
 366 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 367   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
 368 }
 369 
 370 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 371   return _tlab_allocs * HeapWordSize;
 372 }
 373 
 374 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 375   return _gclab_allocs * HeapWordSize;
 376 }
 377 
 378 size_t ShenandoahHeapRegion::get_plab_allocs() const {
 379   return _plab_allocs * HeapWordSize;
 380 }
 381 
 382 void ShenandoahHeapRegion::set_live_data(size_t s) {
 383   assert(Thread::current()->is_VM_thread(), "by VM thread");
 384   _live_data = (s >> LogHeapWordSize);
 385 }
 386 
 387 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 388   st->print("|");
 389   st->print(SIZE_FORMAT_W(5), this->_index);
 390 
 391   switch (_state) {
 392     case _empty_uncommitted:
 393       st->print("|EU ");
 394       break;
 395     case _empty_committed:
 396       st->print("|EC ");
 397       break;
 398     case _regular:
 399       st->print("|R  ");
 400       break;
 401     case _humongous_start:
 402       st->print("|H  ");
 403       break;
 404     case _pinned_humongous_start:
 405       st->print("|HP ");
 406       break;
 407     case _humongous_cont:
 408       st->print("|HC ");
 409       break;
 410     case _cset:
 411       st->print("|CS ");
 412       break;
 413     case _trash:
 414       st->print("|T  ");
 415       break;
 416     case _pinned:
 417       st->print("|P  ");
 418       break;
 419     case _pinned_cset:
 420       st->print("|CSP");
 421       break;
 422     default:
 423       ShouldNotReachHere();
 424   }
 425 
 426   st->print("|%s", shenandoah_affiliation_code(affiliation()));
 427 
 428 #define SHR_PTR_FORMAT "%12" PRIxPTR
 429 
 430   st->print("|BTE " SHR_PTR_FORMAT  ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
 431             p2i(bottom()), p2i(top()), p2i(end()));
 432   st->print("|TAMS " SHR_PTR_FORMAT,
 433             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 434   st->print("|UWM " SHR_PTR_FORMAT,
 435             p2i(_update_watermark));
 436   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 437   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 438   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
 439   if (ShenandoahHeap::heap()->mode()->is_generational()) {
 440     st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
 441   }
 442   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 443   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 444   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
 445   st->cr();
 446 
 447 #undef SHR_PTR_FORMAT
 448 }
 449 
 450 // oop_iterate without closure and without cancellation.  always return true.
 451 bool ShenandoahHeapRegion::oop_fill_and_coalesce_without_cancel() {
 452   HeapWord* obj_addr = resume_coalesce_and_fill();
 453 
 454   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
 455   if (!is_active()) {
 456     end_preemptible_coalesce_and_fill();
 457     return true;
 458   }
 459 
 460   ShenandoahHeap* heap = ShenandoahHeap::heap();
 461   ShenandoahMarkingContext* marking_context = heap->marking_context();
 462   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
 463   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
 464   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
 465   // and will be treated as live during the current old-gen marking pass, even though they will not be
 466   // explicitly marked.
 467   HeapWord* t = marking_context->top_at_mark_start(this);
 468 
 469   // Expect marking to be completed before these threads invoke this service.
 470   assert(heap->active_generation()->is_mark_complete(), "sanity");
 471   while (obj_addr < t) {
 472     oop obj = cast_to_oop(obj_addr);
 473     if (marking_context->is_marked(obj)) {
 474       assert(obj->klass() != nullptr, "klass should not be nullptr");
 475       obj_addr += obj->size();
 476     } else {
 477       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 478       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 479       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 480       size_t fill_size = next_marked_obj - obj_addr;
 481       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
 482       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 483       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
 484       obj_addr = next_marked_obj;
 485     }
 486   }
 487   // Mark that this region has been coalesced and filled
 488   end_preemptible_coalesce_and_fill();
 489   return true;
 490 }
 491 
 492 // oop_iterate without closure, return true if completed without cancellation
 493 bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
 494   HeapWord* obj_addr = resume_coalesce_and_fill();
 495   // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
 496   const size_t preemption_stride = 128;
 497 
 498   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
 499   if (!is_active()) {
 500     end_preemptible_coalesce_and_fill();
 501     return true;
 502   }
 503 
 504   ShenandoahHeap* heap = ShenandoahHeap::heap();
 505   ShenandoahMarkingContext* marking_context = heap->marking_context();
 506   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
 507   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
 508   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
 509   // and will be treated as live during the current old-gen marking pass, even though they will not be
 510   // explicitly marked.
 511   HeapWord* t = marking_context->top_at_mark_start(this);
 512 
 513   // Expect marking to be completed before these threads invoke this service.
 514   assert(heap->active_generation()->is_mark_complete(), "sanity");
 515 
 516   size_t ops_before_preempt_check = preemption_stride;
 517   while (obj_addr < t) {
 518     oop obj = cast_to_oop(obj_addr);
 519     if (marking_context->is_marked(obj)) {
 520       assert(obj->klass() != nullptr, "klass should not be nullptr");
 521       obj_addr += obj->size();
 522     } else {
 523       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 524       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 525       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 526       size_t fill_size = next_marked_obj - obj_addr;
 527       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
 528       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 529       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
 530       obj_addr = next_marked_obj;
 531     }
 532     if (ops_before_preempt_check-- == 0) {
 533       if (heap->cancelled_gc()) {
 534         suspend_coalesce_and_fill(obj_addr);
 535         return false;
 536       }
 537       ops_before_preempt_check = preemption_stride;
 538     }
 539   }
 540   // Mark that this region has been coalesced and filled
 541   end_preemptible_coalesce_and_fill();
 542   return true;
 543 }
 544 
 545 void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
 546   if (!is_active()) return;
 547   if (is_humongous()) {
 548     // No need to fill dead within humongous regions.  Either the entire region is dead, or the entire region is
 549     // unchanged.  A humongous region holds no more than one humongous object.
 550     oop_iterate_humongous(blk);
 551   } else {
 552     global_oop_iterate_objects_and_fill_dead(blk);
 553   }
 554 }
 555 
 556 void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
 557   assert(!is_humongous(), "no humongous region here");
 558   HeapWord* obj_addr = bottom();
 559 
 560   ShenandoahHeap* heap = ShenandoahHeap::heap();
 561   ShenandoahMarkingContext* marking_context = heap->marking_context();
 562   RememberedScanner* rem_set_scanner = heap->card_scan();
 563   // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
 564   HeapWord* t = marking_context->top_at_mark_start(this);
 565 
 566   assert(heap->active_generation()->is_mark_complete(), "sanity");
 567 
 568   while (obj_addr < t) {
 569     oop obj = cast_to_oop(obj_addr);
 570     if (marking_context->is_marked(obj)) {
 571       assert(obj->klass() != nullptr, "klass should not be nullptr");
 572       // when promoting an entire region, we have to register the marked objects as well
 573       obj_addr += obj->oop_iterate_size(blk);
 574     } else {
 575       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 576       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 577       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 578       size_t fill_size = next_marked_obj - obj_addr;
 579       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
 580       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 581       // coalesce_objects() unregisters all but first object subsumed within coalesced range.
 582       rem_set_scanner->coalesce_objects(obj_addr, fill_size);
 583       obj_addr = next_marked_obj;
 584     }
 585   }
 586 
 587   // Any object above TAMS and below top() is considered live.
 588   t = top();
 589   while (obj_addr < t) {
 590     oop obj = cast_to_oop(obj_addr);
 591     obj_addr += obj->oop_iterate_size(blk);
 592   }
 593 }
 594 
 595 // DO NOT CANCEL.  If this worker thread has accepted responsibility for scanning a particular range of addresses, it
 596 // must finish the work before it can be cancelled.
 597 void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only,
 598                                                        HeapWord* start, size_t words, bool write_table) {
 599   assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
 600   assert(is_humongous(), "only humongous region here");
 601   ShenandoahHeap* heap = ShenandoahHeap::heap();
 602 
 603   // Find head.
 604   ShenandoahHeapRegion* r = humongous_start_region();
 605   assert(r->is_humongous_start(), "need humongous head here");
 606   assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
 607          "slice must be integral number of cards");
 608 
 609   oop obj = cast_to_oop(r->bottom());
 610   RememberedScanner* scanner = ShenandoahHeap::heap()->card_scan();
 611   size_t card_index = scanner->card_index_for_addr(start);
 612   size_t num_cards = words / CardTable::card_size_in_words();
 613 
 614   if (dirty_only) {
 615     if (write_table) {
 616       while (num_cards-- > 0) {
 617         if (scanner->is_write_card_dirty(card_index++)) {
 618           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
 619         }
 620         start += CardTable::card_size_in_words();
 621       }
 622     } else {
 623       while (num_cards-- > 0) {
 624         if (scanner->is_card_dirty(card_index++)) {
 625           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
 626         }
 627         start += CardTable::card_size_in_words();
 628       }
 629     }
 630   } else {
 631     // Scan all data, regardless of whether cards are dirty
 632     obj->oop_iterate(blk, MemRegion(start, start + num_cards * CardTable::card_size_in_words()));
 633   }
 634 }
 635 
 636 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk, HeapWord* start, size_t words) {
 637   assert(is_humongous(), "only humongous region here");
 638   // Find head.
 639   ShenandoahHeapRegion* r = humongous_start_region();
 640   assert(r->is_humongous_start(), "need humongous head here");
 641   oop obj = cast_to_oop(r->bottom());
 642   obj->oop_iterate(blk, MemRegion(start, start + words));
 643 }
 644 
 645 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
 646   assert(is_humongous(), "only humongous region here");
 647   // Find head.
 648   ShenandoahHeapRegion* r = humongous_start_region();
 649   assert(r->is_humongous_start(), "need humongous head here");
 650   oop obj = cast_to_oop(r->bottom());
 651   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 652 }
 653 
 654 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 655   ShenandoahHeap* heap = ShenandoahHeap::heap();
 656   assert(is_humongous(), "Must be a part of the humongous region");
 657   size_t i = index();
 658   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 659   while (!r->is_humongous_start()) {
 660     assert(i > 0, "Sanity");
 661     i--;
 662     r = heap->get_region(i);
 663     assert(r->is_humongous(), "Must be a part of the humongous region");
 664   }
 665   assert(r->is_humongous_start(), "Must be");
 666   return r;
 667 }
 668 
 669 void ShenandoahHeapRegion::recycle() {
 670   shenandoah_assert_heaplocked();
 671   ShenandoahHeap* heap = ShenandoahHeap::heap();
 672   ShenandoahGeneration* generation = heap->generation_for(affiliation());
 673   heap->decrease_used(generation, used());
 674 
 675   set_top(bottom());
 676   clear_live_data();
 677 
 678   reset_alloc_metadata();
 679 
 680   heap->marking_context()->reset_top_at_mark_start(this);
 681   set_update_watermark(bottom());
 682 
 683   make_empty();
 684   ShenandoahHeap::heap()->generation_for(affiliation())->decrement_affiliated_region_count();
 685   set_affiliation(FREE);
 686   if (ZapUnusedHeapArea) {
 687     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
 688   }
 689 }
 690 
 691 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
 692   assert(MemRegion(bottom(), end()).contains(p),
 693          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 694          p2i(p), p2i(bottom()), p2i(end()));
 695   if (p >= top()) {
 696     return top();
 697   } else {
 698     HeapWord* last = bottom();
 699     HeapWord* cur = last;
 700     while (cur <= p) {
 701       last = cur;
 702       cur += cast_to_oop(cur)->size();
 703     }
 704     shenandoah_assert_correct(nullptr, cast_to_oop(last));
 705     return last;
 706   }
 707 }
 708 
 709 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
 710   assert(MemRegion(bottom(), end()).contains(p),
 711          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 712          p2i(p), p2i(bottom()), p2i(end()));
 713   if (p < top()) {
 714     return cast_to_oop(p)->size();
 715   } else {
 716     assert(p == top(), "just checking");
 717     return pointer_delta(end(), (HeapWord*) p);
 718   }
 719 }
 720 
 721 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
 722   // Absolute minimums we should not ever break.
 723   static const size_t MIN_REGION_SIZE = 256*K;
 724 
 725   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 726     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 727   }
 728 
 729   // Generational Shenandoah needs this alignment for card tables.
 730   if (strcmp(ShenandoahGCMode, "generational") == 0) {
 731     max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint());
 732   }
 733 
 734   size_t region_size;
 735   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
 736     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 737       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 738                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
 739                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 740                       MIN_NUM_REGIONS,
 741                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
 742       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 743     }
 744     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 745       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
 746                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 747                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 748       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 749     }
 750     if (ShenandoahMinRegionSize < MinTLABSize) {
 751       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
 752                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 753                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
 754       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 755     }
 756     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 757       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
 758                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
 759                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 760       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 761     }
 762     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 763       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
 764                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 765                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 766       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 767     }
 768 
 769     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
 770     // for usual heap sizes. Do not depend on initial_heap_size here.
 771     region_size = max_heap_size / ShenandoahTargetNumRegions;
 772 
 773     // Now make sure that we don't go over or under our limits.
 774     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 775     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 776 
 777   } else {
 778     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 779       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 780                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
 781                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 782                       MIN_NUM_REGIONS,
 783                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
 784       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 785     }
 786     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
 787       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
 788                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
 789                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
 790       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 791     }
 792     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
 793       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
 794                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
 795                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 796       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 797     }
 798     region_size = ShenandoahRegionSize;
 799   }
 800 
 801   // Make sure region size and heap size are page aligned.
 802   // If large pages are used, we ensure that region size is aligned to large page size if
 803   // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
 804   // region size to regular page size.
 805 
 806   // Figure out page size to use, and aligns up heap to page size
 807   size_t page_size = os::vm_page_size();
 808   if (UseLargePages) {
 809     size_t large_page_size = os::large_page_size();
 810     max_heap_size = align_up(max_heap_size, large_page_size);
 811     if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
 812       page_size = large_page_size;
 813     } else {
 814       // Should have been checked during argument initialization
 815       assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
 816     }
 817   } else {
 818     max_heap_size = align_up(max_heap_size, page_size);
 819   }
 820 
 821   // Align region size to page size
 822   region_size = align_up(region_size, page_size);
 823 
 824   int region_size_log = log2i(region_size);
 825   // Recalculate the region size to make sure it's a power of
 826   // 2. This means that region_size is the largest power of 2 that's
 827   // <= what we've calculated so far.
 828   region_size = size_t(1) << region_size_log;
 829 
 830   // Now, set up the globals.
 831   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 832   RegionSizeBytesShift = (size_t)region_size_log;
 833 
 834   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 835   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 836 
 837   guarantee(RegionSizeBytes == 0, "we should only set it once");
 838   RegionSizeBytes = region_size;
 839   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 840   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 841 
 842   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 843   RegionSizeWordsMask = RegionSizeWords - 1;
 844 
 845   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 846   RegionSizeBytesMask = RegionSizeBytes - 1;
 847 
 848   guarantee(RegionCount == 0, "we should only set it once");
 849   RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
 850   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
 851 
 852   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 853   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 854   HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment);
 855   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 856 
 857   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 858   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 859   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 860 
 861   // The rationale for trimming the TLAB sizes has to do with the raciness in
 862   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 863   // about next free size, gets the answer for region #N, goes away for a while, then
 864   // tries to allocate in region #N, and fail because some other thread have claimed part
 865   // of the region #N, and then the freeset allocation code has to retire the region #N,
 866   // before moving the allocation to region #N+1.
 867   //
 868   // The worst case realizes when "answer" is "region size", which means it could
 869   // prematurely retire an entire region. Having smaller TLABs does not fix that
 870   // completely, but reduces the probability of too wasteful region retirement.
 871   // With current divisor, we will waste no more than 1/8 of region size in the worst
 872   // case. This also has a secondary effect on collection set selection: even under
 873   // the race, the regions would be at least 7/8 used, which allows relying on
 874   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 875   // below the garbage threshold that would never be considered for collection.
 876   //
 877   // The whole thing is mitigated if Elastic TLABs are enabled.
 878   //
 879   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 880   MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
 881   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
 882 
 883   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 884   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
 885   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 886 
 887   return max_heap_size;
 888 }
 889 
 890 void ShenandoahHeapRegion::do_commit() {
 891   ShenandoahHeap* heap = ShenandoahHeap::heap();
 892   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
 893     report_java_out_of_memory("Unable to commit region");
 894   }
 895   if (!heap->commit_bitmap_slice(this)) {
 896     report_java_out_of_memory("Unable to commit bitmaps for region");
 897   }
 898   if (AlwaysPreTouch) {
 899     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
 900   }
 901   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 902 }
 903 
 904 void ShenandoahHeapRegion::do_uncommit() {
 905   ShenandoahHeap* heap = ShenandoahHeap::heap();
 906   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
 907     report_java_out_of_memory("Unable to uncommit region");
 908   }
 909   if (!heap->uncommit_bitmap_slice(this)) {
 910     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 911   }
 912   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 913 }
 914 
 915 void ShenandoahHeapRegion::set_state(RegionState to) {
 916   EventShenandoahHeapRegionStateChange evt;
 917   if (evt.should_commit()){
 918     evt.set_index((unsigned) index());
 919     evt.set_start((uintptr_t)bottom());
 920     evt.set_used(used());
 921     evt.set_from(_state);
 922     evt.set_to(to);
 923     evt.commit();
 924   }
 925   _state = to;
 926 }
 927 
 928 void ShenandoahHeapRegion::record_pin() {
 929   Atomic::add(&_critical_pins, (size_t)1);
 930 }
 931 
 932 void ShenandoahHeapRegion::record_unpin() {
 933   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
 934   Atomic::sub(&_critical_pins, (size_t)1);
 935 }
 936 
 937 size_t ShenandoahHeapRegion::pin_count() const {
 938   return Atomic::load(&_critical_pins);
 939 }
 940 
 941 void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) {
 942   ShenandoahHeap* heap = ShenandoahHeap::heap();
 943 
 944   ShenandoahAffiliation region_affiliation = heap->region_affiliation(this);
 945   {
 946     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 947     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
 948                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
 949                   index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation),
 950                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
 951   }
 952 
 953 #ifdef ASSERT
 954   {
 955     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
 956     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 957     size_t idx = this->index();
 958     HeapWord* top_bitmap = ctx->top_bitmap(this);
 959 
 960     assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
 961            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
 962            p2i(top_bitmap), p2i(_end));
 963   }
 964 #endif
 965 
 966   if (region_affiliation == new_affiliation) {
 967     return;
 968   }
 969 
 970   if (!heap->mode()->is_generational()) {
 971     log_trace(gc)("Changing affiliation of region %zu from %s to %s",
 972                   index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation));
 973     heap->set_affiliation(this, new_affiliation);
 974     return;
 975   }
 976 
 977   switch (new_affiliation) {
 978     case FREE:
 979       assert(!has_live(), "Free region should not have live data");
 980       break;
 981     case YOUNG_GENERATION:
 982       reset_age();
 983       break;
 984     case OLD_GENERATION:
 985       // TODO: should we reset_age() for OLD as well?  Examine invocations of set_affiliation(). Some contexts redundantly
 986       //       invoke reset_age().
 987       break;
 988     default:
 989       ShouldNotReachHere();
 990       return;
 991   }
 992   heap->set_affiliation(this, new_affiliation);
 993 }
 994 
 995 // When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
 996 // set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
 997 // We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting poitners"
 998 // contained herein.
 999 void ShenandoahHeapRegion::promote_in_place() {
1000   ShenandoahHeap* heap = ShenandoahHeap::heap();
1001   ShenandoahMarkingContext* marking_context = heap->marking_context();
1002   HeapWord* tams = marking_context->top_at_mark_start(this);
1003   assert(heap->active_generation()->is_mark_complete(), "sanity");
1004   assert(is_young(), "Only young regions can be promoted");
1005   assert(is_regular(), "Use different service to promote humongous regions");
1006   assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
1007 
1008   ShenandoahOldGeneration* old_gen = heap->old_generation();
1009   ShenandoahYoungGeneration* young_gen = heap->young_generation();
1010   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1011 
1012   {
1013     ShenandoahHeapLocker locker(heap->lock());
1014 
1015     HeapWord* update_watermark = get_update_watermark();
1016 
1017     // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
1018     // is_collector_free range.
1019     restore_top_before_promote();
1020 
1021     size_t region_capacity = free();
1022     size_t region_used = used();
1023 
1024     // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
1025     assert(update_watermark >= top(), "original top cannot exceed preserved update_watermark");
1026     set_update_watermark(top());
1027 
1028     // Unconditionally transfer one region from young to old to represent the newly promoted region.
1029     // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
1030     // if there are already enough unaffiliated regions in old to account for this newly promoted region.
1031     // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
1032     // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
1033     // we would be trading a fully empty region for a partially used region.
1034 
1035     young_gen->decrease_used(region_used);
1036     young_gen->decrement_affiliated_region_count();
1037 
1038     // transfer_to_old() increases capacity of old and decreases capacity of young
1039     heap->generation_sizer()->force_transfer_to_old(1);
1040     set_affiliation(OLD_GENERATION);
1041 
1042     old_gen->increment_affiliated_region_count();
1043     old_gen->increase_used(region_used);
1044 
1045     // add_old_collector_free_region() increases promoted_reserve() if available space exceeds PLAB::min_size()
1046     heap->free_set()->add_old_collector_free_region(this);
1047   }
1048 
1049   assert(top() == tams, "Cannot promote regions in place if top has advanced beyond TAMS");
1050 
1051   // Since this region may have served previously as OLD, it may hold obsolete object range info.
1052   heap->card_scan()->reset_object_range(bottom(), end());
1053   heap->card_scan()->mark_range_as_dirty(bottom(), top() - bottom());
1054 
1055   // TODO: use an existing coalesce-and-fill function rather than
1056   // replicating the code here.
1057   HeapWord* obj_addr = bottom();
1058   while (obj_addr < tams) {
1059     oop obj = cast_to_oop(obj_addr);
1060     if (marking_context->is_marked(obj)) {
1061       assert(obj->klass() != NULL, "klass should not be NULL");
1062       // This thread is responsible for registering all objects in this region.  No need for lock.
1063       heap->card_scan()->register_object_without_lock(obj_addr);
1064       obj_addr += obj->size();
1065     } else {
1066       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
1067       assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
1068       size_t fill_size = next_marked_obj - obj_addr;
1069       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
1070       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
1071       heap->card_scan()->register_object_without_lock(obj_addr);
1072       obj_addr = next_marked_obj;
1073     }
1074   }
1075 
1076   // We do not need to scan above TAMS because top equals tams
1077   assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
1078 }
1079 
1080 void ShenandoahHeapRegion::promote_humongous() {
1081   ShenandoahHeap* heap = ShenandoahHeap::heap();
1082   ShenandoahMarkingContext* marking_context = heap->marking_context();
1083   assert(heap->active_generation()->is_mark_complete(), "sanity");
1084   assert(is_young(), "Only young regions can be promoted");
1085   assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
1086   assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
1087 
1088   ShenandoahGeneration* old_generation = heap->old_generation();
1089   ShenandoahGeneration* young_generation = heap->young_generation();
1090 
1091   oop obj = cast_to_oop(bottom());
1092   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
1093 
1094   // TODO: Consider not promoting humongous objects that represent primitive arrays.  Leaving a primitive array
1095   // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not
1096   // scanned.  Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when
1097   // it becomes garbage.  Better to not make this change until sizes of young-gen and old-gen are completely
1098   // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
1099   // has carefully analyzed the required sizes of an application's young-gen and old-gen.
1100   size_t used_bytes = obj->size() * HeapWordSize;
1101   size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
1102   size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
1103   size_t index_limit = index() + spanned_regions;
1104   {
1105     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
1106     // young to old.
1107     ShenandoahHeapLocker locker(heap->lock());
1108 
1109     // We promote humongous objects unconditionally, without checking for availability.  We adjust
1110     // usage totals, including humongous waste, after evacuation is done.
1111     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
1112 
1113     young_generation->decrease_used(used_bytes);
1114     young_generation->decrease_humongous_waste(humongous_waste);
1115     young_generation->decrease_affiliated_region_count(spanned_regions);
1116 
1117     // transfer_to_old() increases capacity of old and decreases capacity of young
1118     heap->generation_sizer()->force_transfer_to_old(spanned_regions);
1119 
1120     // For this region and each humongous continuation region spanned by this humongous object, change
1121     // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
1122     // in the last humongous region that is not spanned by obj is currently not used.
1123     for (size_t i = index(); i < index_limit; i++) {
1124       ShenandoahHeapRegion* r = heap->get_region(i);
1125       log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
1126                     r->index(), p2i(r->bottom()), p2i(r->top()));
1127       // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
1128       r->set_affiliation(OLD_GENERATION);
1129     }
1130 
1131     old_generation->increase_affiliated_region_count(spanned_regions);
1132     old_generation->increase_used(used_bytes);
1133     old_generation->increase_humongous_waste(humongous_waste);
1134   }
1135 
1136   // Since this region may have served previously as OLD, it may hold obsolete object range info.
1137   heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
1138   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
1139   heap->card_scan()->register_object_without_lock(bottom());
1140 
1141   if (obj->is_typeArray()) {
1142     // Primitive arrays don't need to be scanned.
1143     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1144                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
1145     heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
1146   } else {
1147     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1148                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
1149     heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
1150   }
1151 }
1152 
1153 void ShenandoahHeapRegion::decrement_humongous_waste() const {
1154   assert(is_humongous(), "Should only use this for humongous regions");
1155   size_t waste_bytes = free();
1156   if (waste_bytes > 0) {
1157     ShenandoahHeap* heap = ShenandoahHeap::heap();
1158     ShenandoahGeneration* generation = heap->generation_for(affiliation());
1159     heap->decrease_humongous_waste(generation, waste_bytes);
1160   }
1161 }