1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/cardTableRS.hpp"
  27 #include "gc/shared/space.inline.hpp"
  28 #include "gc/shared/tlab_globals.hpp"
  29 #include "gc/shenandoah/shenandoahCardTable.hpp"
  30 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  32 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahGeneration.hpp"
  36 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  37 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  38 #include "jfr/jfrEvents.hpp"
  39 #include "memory/allocation.hpp"
  40 #include "memory/iterator.inline.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/atomic.hpp"
  45 #include "runtime/globals_extension.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/mutexLocker.hpp"
  48 #include "runtime/os.hpp"
  49 #include "runtime/safepoint.hpp"
  50 #include "utilities/powerOfTwo.hpp"
  51 
  52 
  53 size_t ShenandoahHeapRegion::RegionCount = 0;
  54 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  55 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  56 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  57 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  58 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  59 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  60 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  61 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  62 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  63 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  64 
  65 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
  66   _index(index),
  67   _bottom(start),
  68   _end(start + RegionSizeWords),
  69   _new_top(NULL),
  70   _empty_time(os::elapsedTime()),
  71   _state(committed ? _empty_committed : _empty_uncommitted),
  72   _top(start),
  73   _tlab_allocs(0),
  74   _gclab_allocs(0),
  75   _plab_allocs(0),
  76   _has_young_lab(false),
  77   _live_data(0),
  78   _critical_pins(0),
  79   _update_watermark(start),
  80   _age(0) {
  81 
  82   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
  83          "invalid space boundaries");
  84   if (ZapUnusedHeapArea && committed) {
  85     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
  86   }
  87 }
  88 
  89 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  90   stringStream ss;
  91   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
  92   print_on(&ss);
  93   fatal("%s", ss.freeze());
  94 }
  95 
  96 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahRegionAffiliation affiliation) {
  97   shenandoah_assert_heaplocked();
  98   reset_age();
  99   switch (_state) {
 100     case _empty_uncommitted:
 101       do_commit();
 102     case _empty_committed:
 103       set_affiliation(affiliation);
 104       set_state(_regular);
 105     case _regular:
 106     case _pinned:
 107       return;
 108     default:
 109       report_illegal_transition("regular allocation");
 110   }
 111 }
 112 
 113 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
 114 // behavior previously performed as a side effect of make_regular_bypass().
 115 void ShenandoahHeapRegion::make_young_maybe() {
 116   shenandoah_assert_heaplocked();
 117   switch (_state) {
 118    case _empty_uncommitted:
 119    case _empty_committed:
 120    case _cset:
 121    case _humongous_start:
 122    case _humongous_cont:
 123      set_affiliation(YOUNG_GENERATION);
 124      return;
 125    case _pinned_cset:
 126    case _regular:
 127    case _pinned:
 128      return;
 129    default:
 130      assert(false, "Unexpected _state in make_young_maybe");
 131   }
 132 }
 133 
 134 void ShenandoahHeapRegion::make_regular_bypass() {
 135   shenandoah_assert_heaplocked();
 136   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
 137           "only for full or degen GC");
 138   reset_age();
 139   switch (_state) {
 140     case _empty_uncommitted:
 141       do_commit();
 142     case _empty_committed:
 143     case _cset:
 144     case _humongous_start:
 145     case _humongous_cont:
 146       set_state(_regular);
 147       return;
 148     case _pinned_cset:
 149       set_state(_pinned);
 150       return;
 151     case _regular:
 152     case _pinned:
 153       return;
 154     default:
 155       report_illegal_transition("regular bypass");
 156   }
 157 }
 158 
 159 void ShenandoahHeapRegion::make_humongous_start() {
 160   shenandoah_assert_heaplocked();
 161   reset_age();
 162   switch (_state) {
 163     case _empty_uncommitted:
 164       do_commit();
 165     case _empty_committed:
 166       set_state(_humongous_start);
 167       return;
 168     default:
 169       report_illegal_transition("humongous start allocation");
 170   }
 171 }
 172 
 173 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahRegionAffiliation affiliation) {
 174   shenandoah_assert_heaplocked();
 175   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 176   set_affiliation(affiliation);
 177   reset_age();
 178   switch (_state) {
 179     case _empty_committed:
 180     case _regular:
 181     case _humongous_start:
 182     case _humongous_cont:
 183       set_state(_humongous_start);
 184       return;
 185     default:
 186       report_illegal_transition("humongous start bypass");
 187   }
 188 }
 189 
 190 void ShenandoahHeapRegion::make_humongous_cont() {
 191   shenandoah_assert_heaplocked();
 192   reset_age();
 193   switch (_state) {
 194     case _empty_uncommitted:
 195       do_commit();
 196     case _empty_committed:
 197      set_state(_humongous_cont);
 198       return;
 199     default:
 200       report_illegal_transition("humongous continuation allocation");
 201   }
 202 }
 203 
 204 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahRegionAffiliation affiliation) {
 205   shenandoah_assert_heaplocked();
 206   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 207   set_affiliation(affiliation);
 208   reset_age();
 209   switch (_state) {
 210     case _empty_committed:
 211     case _regular:
 212     case _humongous_start:
 213     case _humongous_cont:
 214       set_state(_humongous_cont);
 215       return;
 216     default:
 217       report_illegal_transition("humongous continuation bypass");
 218   }
 219 }
 220 
 221 void ShenandoahHeapRegion::make_pinned() {
 222   shenandoah_assert_heaplocked();
 223   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
 224 
 225   switch (_state) {
 226     case _regular:
 227       set_state(_pinned);
 228     case _pinned_cset:
 229     case _pinned:
 230       return;
 231     case _humongous_start:
 232       set_state(_pinned_humongous_start);
 233     case _pinned_humongous_start:
 234       return;
 235     case _cset:
 236       _state = _pinned_cset;
 237       return;
 238     default:
 239       report_illegal_transition("pinning");
 240   }
 241 }
 242 
 243 void ShenandoahHeapRegion::make_unpinned() {
 244   shenandoah_assert_heaplocked();
 245   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
 246 
 247   switch (_state) {
 248     case _pinned:
 249       assert(affiliation() != FREE, "Pinned region should not be FREE");
 250       set_state(_regular);
 251       return;
 252     case _regular:
 253     case _humongous_start:
 254       return;
 255     case _pinned_cset:
 256       set_state(_cset);
 257       return;
 258     case _pinned_humongous_start:
 259       set_state(_humongous_start);
 260       return;
 261     default:
 262       report_illegal_transition("unpinning");
 263   }
 264 }
 265 
 266 void ShenandoahHeapRegion::make_cset() {
 267   shenandoah_assert_heaplocked();
 268   // Leave age untouched.  We need to consult the age when we are deciding whether to promote evacuated objects.
 269   switch (_state) {
 270     case _regular:
 271       set_state(_cset);
 272     case _cset:
 273       return;
 274     default:
 275       report_illegal_transition("cset");
 276   }
 277 }
 278 
 279 void ShenandoahHeapRegion::make_trash() {
 280   shenandoah_assert_heaplocked();
 281   reset_age();
 282   switch (_state) {
 283     case _cset:
 284       // Reclaiming cset regions
 285     case _humongous_start:
 286     case _humongous_cont:
 287       // Reclaiming humongous regions
 288     case _regular:
 289       // Immediate region reclaim
 290       set_state(_trash);
 291       return;
 292     default:
 293       report_illegal_transition("trashing");
 294   }
 295 }
 296 
 297 void ShenandoahHeapRegion::make_trash_immediate() {
 298   make_trash();
 299 
 300   // On this path, we know there are no marked objects in the region,
 301   // tell marking context about it to bypass bitmap resets.
 302   assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
 303   ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
 304 }
 305 
 306 void ShenandoahHeapRegion::make_empty() {
 307   shenandoah_assert_heaplocked();
 308   reset_age();
 309   switch (_state) {
 310     case _trash:
 311       set_state(_empty_committed);
 312       _empty_time = os::elapsedTime();
 313       return;
 314     default:
 315       report_illegal_transition("emptying");
 316   }
 317 }
 318 
 319 void ShenandoahHeapRegion::make_uncommitted() {
 320   shenandoah_assert_heaplocked();
 321   switch (_state) {
 322     case _empty_committed:
 323       do_uncommit();
 324       set_state(_empty_uncommitted);
 325       return;
 326     default:
 327       report_illegal_transition("uncommiting");
 328   }
 329 }
 330 
 331 void ShenandoahHeapRegion::make_committed_bypass() {
 332   shenandoah_assert_heaplocked();
 333   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 334 
 335   switch (_state) {
 336     case _empty_uncommitted:
 337       do_commit();
 338       set_state(_empty_committed);
 339       return;
 340     default:
 341       report_illegal_transition("commit bypass");
 342   }
 343 }
 344 
 345 void ShenandoahHeapRegion::reset_alloc_metadata() {
 346   _tlab_allocs = 0;
 347   _gclab_allocs = 0;
 348   _plab_allocs = 0;
 349 }
 350 
 351 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 352   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
 353 }
 354 
 355 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 356   return _tlab_allocs * HeapWordSize;
 357 }
 358 
 359 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 360   return _gclab_allocs * HeapWordSize;
 361 }
 362 
 363 size_t ShenandoahHeapRegion::get_plab_allocs() const {
 364   return _plab_allocs * HeapWordSize;
 365 }
 366 
 367 void ShenandoahHeapRegion::set_live_data(size_t s) {
 368   assert(Thread::current()->is_VM_thread(), "by VM thread");
 369   _live_data = (s >> LogHeapWordSize);
 370 }
 371 
 372 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 373   st->print("|");
 374   st->print(SIZE_FORMAT_W(5), this->_index);
 375 
 376   switch (_state) {
 377     case _empty_uncommitted:
 378       st->print("|EU ");
 379       break;
 380     case _empty_committed:
 381       st->print("|EC ");
 382       break;
 383     case _regular:
 384       st->print("|R  ");
 385       break;
 386     case _humongous_start:
 387       st->print("|H  ");
 388       break;
 389     case _pinned_humongous_start:
 390       st->print("|HP ");
 391       break;
 392     case _humongous_cont:
 393       st->print("|HC ");
 394       break;
 395     case _cset:
 396       st->print("|CS ");
 397       break;
 398     case _trash:
 399       st->print("|T  ");
 400       break;
 401     case _pinned:
 402       st->print("|P  ");
 403       break;
 404     case _pinned_cset:
 405       st->print("|CSP");
 406       break;
 407     default:
 408       ShouldNotReachHere();
 409   }
 410 
 411   switch (ShenandoahHeap::heap()->region_affiliation(this)) {
 412     case ShenandoahRegionAffiliation::FREE:
 413       st->print("|F");
 414       break;
 415     case ShenandoahRegionAffiliation::YOUNG_GENERATION:
 416       st->print("|Y");
 417       break;
 418     case ShenandoahRegionAffiliation::OLD_GENERATION:
 419       st->print("|O");
 420       break;
 421     default:
 422       ShouldNotReachHere();
 423   }
 424 
 425 #define SHR_PTR_FORMAT "%12" PRIxPTR
 426 
 427   st->print("|BTE " SHR_PTR_FORMAT  ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
 428             p2i(bottom()), p2i(top()), p2i(end()));
 429   st->print("|TAMS " SHR_PTR_FORMAT,
 430             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 431   st->print("|UWM " SHR_PTR_FORMAT,
 432             p2i(_update_watermark));
 433   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 434   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 435   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
 436   if (ShenandoahHeap::heap()->mode()->is_generational()) {
 437     st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
 438   }
 439   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 440   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 441   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
 442   st->cr();
 443 
 444 #undef SHR_PTR_FORMAT
 445 }
 446 
 447 // oop_iterate without closure and without cancellation.  always return true.
 448 bool ShenandoahHeapRegion::oop_fill_and_coalesce_wo_cancel() {
 449   HeapWord* obj_addr = resume_coalesce_and_fill();
 450 
 451   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
 452   if (!is_active()) {
 453     end_preemptible_coalesce_and_fill();
 454     return true;
 455   }
 456 
 457   ShenandoahHeap* heap = ShenandoahHeap::heap();
 458   ShenandoahMarkingContext* marking_context = heap->marking_context();
 459   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
 460   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
 461   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
 462   // and will be treated as live during the current old-gen marking pass, even though they will not be
 463   // explicitly marked.
 464   HeapWord* t = marking_context->top_at_mark_start(this);
 465 
 466   // Expect marking to be completed before these threads invoke this service.
 467   assert(heap->active_generation()->is_mark_complete(), "sanity");
 468   while (obj_addr < t) {
 469     oop obj = cast_to_oop(obj_addr);
 470     if (marking_context->is_marked(obj)) {
 471       assert(obj->klass() != NULL, "klass should not be NULL");
 472       obj_addr += obj->size();
 473     } else {
 474       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 475       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 476       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 477       size_t fill_size = next_marked_obj - obj_addr;
 478       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 479       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
 480       obj_addr = next_marked_obj;
 481     }
 482   }
 483   // Mark that this region has been coalesced and filled
 484   end_preemptible_coalesce_and_fill();
 485   return true;
 486 }
 487 
 488 // oop_iterate without closure, return true if completed without cancellation
 489 bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
 490   HeapWord* obj_addr = resume_coalesce_and_fill();
 491   // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
 492   const size_t preemption_stride = 128;
 493 
 494   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
 495   if (!is_active()) {
 496     end_preemptible_coalesce_and_fill();
 497     return true;
 498   }
 499 
 500   ShenandoahHeap* heap = ShenandoahHeap::heap();
 501   ShenandoahMarkingContext* marking_context = heap->marking_context();
 502   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
 503   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
 504   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
 505   // and will be treated as live during the current old-gen marking pass, even though they will not be
 506   // explicitly marked.
 507   HeapWord* t = marking_context->top_at_mark_start(this);
 508 
 509   // Expect marking to be completed before these threads invoke this service.
 510   assert(heap->active_generation()->is_mark_complete(), "sanity");
 511 
 512   size_t ops_before_preempt_check = preemption_stride;
 513   while (obj_addr < t) {
 514     oop obj = cast_to_oop(obj_addr);
 515     if (marking_context->is_marked(obj)) {
 516       assert(obj->klass() != NULL, "klass should not be NULL");
 517       obj_addr += obj->size();
 518     } else {
 519       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 520       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 521       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 522       size_t fill_size = next_marked_obj - obj_addr;
 523       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 524       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
 525       obj_addr = next_marked_obj;
 526     }
 527     if (ops_before_preempt_check-- == 0) {
 528       if (heap->cancelled_gc()) {
 529         suspend_coalesce_and_fill(obj_addr);
 530         return false;
 531       }
 532       ops_before_preempt_check = preemption_stride;
 533     }
 534   }
 535   // Mark that this region has been coalesced and filled
 536   end_preemptible_coalesce_and_fill();
 537   return true;
 538 }
 539 
 540 void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
 541   if (!is_active()) return;
 542   if (is_humongous()) {
 543     // No need to fill dead within humongous regions.  Either the entire region is dead, or the entire region is
 544     // unchanged.  A humongous region holds no more than one humongous object.
 545     oop_iterate_humongous(blk);
 546   } else {
 547     global_oop_iterate_objects_and_fill_dead(blk);
 548   }
 549 }
 550 
 551 void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
 552   assert(!is_humongous(), "no humongous region here");
 553   HeapWord* obj_addr = bottom();
 554 
 555   ShenandoahHeap* heap = ShenandoahHeap::heap();
 556   ShenandoahMarkingContext* marking_context = heap->marking_context();
 557   RememberedScanner* rem_set_scanner = heap->card_scan();
 558   // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
 559   HeapWord* t = marking_context->top_at_mark_start(this);
 560 
 561   assert(heap->active_generation()->is_mark_complete(), "sanity");
 562 
 563   while (obj_addr < t) {
 564     oop obj = cast_to_oop(obj_addr);
 565     if (marking_context->is_marked(obj)) {
 566       assert(obj->klass() != NULL, "klass should not be NULL");
 567       // when promoting an entire region, we have to register the marked objects as well
 568       obj_addr += obj->oop_iterate_size(blk);
 569     } else {
 570       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 571       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 572       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 573       size_t fill_size = next_marked_obj - obj_addr;
 574       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 575 
 576       // coalesce_objects() unregisters all but first object subsumed within coalesced range.
 577       rem_set_scanner->coalesce_objects(obj_addr, fill_size);
 578       obj_addr = next_marked_obj;
 579     }
 580   }
 581 
 582   // Any object above TAMS and below top() is considered live.
 583   t = top();
 584   while (obj_addr < t) {
 585     oop obj = cast_to_oop(obj_addr);
 586     obj_addr += obj->oop_iterate_size(blk);
 587   }
 588 }
 589 
 590 // DO NOT CANCEL.  If this worker thread has accepted responsibility for scanning a particular range of addresses, it
 591 // must finish the work before it can be cancelled.
 592 void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only,
 593                                                        HeapWord* start, size_t words, bool write_table, bool is_concurrent) {
 594   assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
 595   assert(is_humongous(), "only humongous region here");
 596   ShenandoahHeap* heap = ShenandoahHeap::heap();
 597 
 598   // Find head.
 599   ShenandoahHeapRegion* r = humongous_start_region();
 600   assert(r->is_humongous_start(), "need humongous head here");
 601   assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
 602          "slice must be integral number of cards");
 603 
 604   oop obj = cast_to_oop(r->bottom());
 605   RememberedScanner* scanner = ShenandoahHeap::heap()->card_scan();
 606   size_t card_index = scanner->card_index_for_addr(start);
 607   size_t num_cards = words / CardTable::card_size_in_words();
 608 
 609   if (dirty_only) {
 610     if (write_table) {
 611       while (num_cards-- > 0) {
 612         if (scanner->is_write_card_dirty(card_index++)) {
 613           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
 614         }
 615         start += CardTable::card_size_in_words();
 616       }
 617     } else {
 618       while (num_cards-- > 0) {
 619         if (scanner->is_card_dirty(card_index++)) {
 620           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
 621         }
 622         start += CardTable::card_size_in_words();
 623       }
 624     }
 625   } else {
 626     // Scan all data, regardless of whether cards are dirty
 627     obj->oop_iterate(blk, MemRegion(start, start + num_cards * CardTable::card_size_in_words()));
 628   }
 629 }
 630 
 631 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk, HeapWord* start, size_t words) {
 632   assert(is_humongous(), "only humongous region here");
 633   // Find head.
 634   ShenandoahHeapRegion* r = humongous_start_region();
 635   assert(r->is_humongous_start(), "need humongous head here");
 636   oop obj = cast_to_oop(r->bottom());
 637   obj->oop_iterate(blk, MemRegion(start, start + words));
 638 }
 639 
 640 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
 641   assert(is_humongous(), "only humongous region here");
 642   // Find head.
 643   ShenandoahHeapRegion* r = humongous_start_region();
 644   assert(r->is_humongous_start(), "need humongous head here");
 645   oop obj = cast_to_oop(r->bottom());
 646   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 647 }
 648 
 649 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 650   ShenandoahHeap* heap = ShenandoahHeap::heap();
 651   assert(is_humongous(), "Must be a part of the humongous region");
 652   size_t i = index();
 653   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 654   while (!r->is_humongous_start()) {
 655     assert(i > 0, "Sanity");
 656     i--;
 657     r = heap->get_region(i);
 658     assert(r->is_humongous(), "Must be a part of the humongous region");
 659   }
 660   assert(r->is_humongous_start(), "Must be");
 661   return r;
 662 }
 663 
 664 void ShenandoahHeapRegion::recycle() {
 665   ShenandoahHeap* heap = ShenandoahHeap::heap();
 666   shenandoah_assert_heaplocked();
 667 
 668   if (affiliation() == YOUNG_GENERATION) {
 669     heap->young_generation()->decrease_used(used());
 670   } else if (affiliation() == OLD_GENERATION) {
 671     heap->old_generation()->decrease_used(used());
 672   }
 673 
 674   set_top(bottom());
 675   clear_live_data();
 676 
 677   reset_alloc_metadata();
 678 
 679   heap->marking_context()->reset_top_at_mark_start(this);
 680   set_update_watermark(bottom());
 681 
 682   make_empty();
 683   set_affiliation(FREE);
 684 
 685   if (ZapUnusedHeapArea) {
 686     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
 687   }
 688 }
 689 
 690 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
 691   assert(MemRegion(bottom(), end()).contains(p),
 692          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 693          p2i(p), p2i(bottom()), p2i(end()));
 694   if (p >= top()) {
 695     return top();
 696   } else {
 697     HeapWord* last = bottom();
 698     HeapWord* cur = last;
 699     while (cur <= p) {
 700       last = cur;
 701       cur += cast_to_oop(cur)->size();
 702     }
 703     shenandoah_assert_correct(NULL, cast_to_oop(last));
 704     return last;
 705   }
 706 }
 707 
 708 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
 709   assert(MemRegion(bottom(), end()).contains(p),
 710          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 711          p2i(p), p2i(bottom()), p2i(end()));
 712   if (p < top()) {
 713     return cast_to_oop(p)->size();
 714   } else {
 715     assert(p == top(), "just checking");
 716     return pointer_delta(end(), (HeapWord*) p);
 717   }
 718 }
 719 
 720 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
 721   // Absolute minimums we should not ever break.
 722   static const size_t MIN_REGION_SIZE = 256*K;
 723 
 724   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 725     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 726   }
 727 
 728   // Generational Shenandoah needs this alignment for card tables.
 729   if (strcmp(ShenandoahGCMode, "generational") == 0) {
 730     max_heap_size = align_up(max_heap_size , CardTableRS::ct_max_alignment_constraint());
 731   }
 732 
 733   size_t region_size;
 734   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
 735     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 736       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 737                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
 738                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 739                       MIN_NUM_REGIONS,
 740                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
 741       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 742     }
 743     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 744       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
 745                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 746                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 747       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 748     }
 749     if (ShenandoahMinRegionSize < MinTLABSize) {
 750       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
 751                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 752                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
 753       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 754     }
 755     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 756       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
 757                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
 758                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 759       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 760     }
 761     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 762       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
 763                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 764                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 765       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 766     }
 767 
 768     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
 769     // for usual heap sizes. Do not depend on initial_heap_size here.
 770     region_size = max_heap_size / ShenandoahTargetNumRegions;
 771 
 772     // Now make sure that we don't go over or under our limits.
 773     region_size = MAX2(ShenandoahMinRegionSize, region_size);
 774     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
 775 
 776   } else {
 777     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 778       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 779                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
 780                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 781                       MIN_NUM_REGIONS,
 782                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
 783       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 784     }
 785     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
 786       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
 787                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
 788                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
 789       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 790     }
 791     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
 792       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
 793                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
 794                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 795       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 796     }
 797     region_size = ShenandoahRegionSize;
 798   }
 799 
 800   // Make sure region size and heap size are page aligned.
 801   // If large pages are used, we ensure that region size is aligned to large page size if
 802   // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
 803   // region size to regular page size.
 804 
 805   // Figure out page size to use, and aligns up heap to page size
 806   int page_size = os::vm_page_size();
 807   if (UseLargePages) {
 808     size_t large_page_size = os::large_page_size();
 809     max_heap_size = align_up(max_heap_size, large_page_size);
 810     if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
 811       page_size = (int)large_page_size;
 812     } else {
 813       // Should have been checked during argument initialization
 814       assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
 815     }
 816   } else {
 817     max_heap_size = align_up(max_heap_size, page_size);
 818   }
 819 
 820   // Align region size to page size
 821   region_size = align_up(region_size, page_size);
 822 
 823   int region_size_log = log2i(region_size);
 824   // Recalculate the region size to make sure it's a power of
 825   // 2. This means that region_size is the largest power of 2 that's
 826   // <= what we've calculated so far.
 827   region_size = size_t(1) << region_size_log;
 828 
 829   // Now, set up the globals.
 830   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 831   RegionSizeBytesShift = (size_t)region_size_log;
 832 
 833   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 834   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 835 
 836   guarantee(RegionSizeBytes == 0, "we should only set it once");
 837   RegionSizeBytes = region_size;
 838   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 839   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 840 
 841   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 842   RegionSizeWordsMask = RegionSizeWords - 1;
 843 
 844   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 845   RegionSizeBytesMask = RegionSizeBytes - 1;
 846 
 847   guarantee(RegionCount == 0, "we should only set it once");
 848   RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
 849   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
 850 
 851   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 852   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 853   HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment);
 854   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 855 
 856   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 857   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 858   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 859 
 860   // The rationale for trimming the TLAB sizes has to do with the raciness in
 861   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 862   // about next free size, gets the answer for region #N, goes away for a while, then
 863   // tries to allocate in region #N, and fail because some other thread have claimed part
 864   // of the region #N, and then the freeset allocation code has to retire the region #N,
 865   // before moving the allocation to region #N+1.
 866   //
 867   // The worst case realizes when "answer" is "region size", which means it could
 868   // prematurely retire an entire region. Having smaller TLABs does not fix that
 869   // completely, but reduces the probability of too wasteful region retirement.
 870   // With current divisor, we will waste no more than 1/8 of region size in the worst
 871   // case. This also has a secondary effect on collection set selection: even under
 872   // the race, the regions would be at least 7/8 used, which allows relying on
 873   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 874   // below the garbage threshold that would never be considered for collection.
 875   //
 876   // The whole thing is mitigated if Elastic TLABs are enabled.
 877   //
 878   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 879   MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
 880   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
 881 
 882   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 883   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
 884   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 885 
 886   return max_heap_size;
 887 }
 888 
 889 void ShenandoahHeapRegion::do_commit() {
 890   ShenandoahHeap* heap = ShenandoahHeap::heap();
 891   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
 892     report_java_out_of_memory("Unable to commit region");
 893   }
 894   if (!heap->commit_bitmap_slice(this)) {
 895     report_java_out_of_memory("Unable to commit bitmaps for region");
 896   }
 897   if (AlwaysPreTouch) {
 898     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
 899   }
 900   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 901 }
 902 
 903 void ShenandoahHeapRegion::do_uncommit() {
 904   ShenandoahHeap* heap = ShenandoahHeap::heap();
 905   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
 906     report_java_out_of_memory("Unable to uncommit region");
 907   }
 908   if (!heap->uncommit_bitmap_slice(this)) {
 909     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 910   }
 911   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 912 }
 913 
 914 void ShenandoahHeapRegion::set_state(RegionState to) {
 915   EventShenandoahHeapRegionStateChange evt;
 916   if (evt.should_commit()){
 917     evt.set_index((unsigned) index());
 918     evt.set_start((uintptr_t)bottom());
 919     evt.set_used(used());
 920     evt.set_from(_state);
 921     evt.set_to(to);
 922     evt.commit();
 923   }
 924   _state = to;
 925 }
 926 
 927 void ShenandoahHeapRegion::record_pin() {
 928   Atomic::add(&_critical_pins, (size_t)1);
 929 }
 930 
 931 void ShenandoahHeapRegion::record_unpin() {
 932   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
 933   Atomic::sub(&_critical_pins, (size_t)1);
 934 }
 935 
 936 size_t ShenandoahHeapRegion::pin_count() const {
 937   return Atomic::load(&_critical_pins);
 938 }
 939 
 940 void ShenandoahHeapRegion::set_affiliation(ShenandoahRegionAffiliation new_affiliation) {
 941   ShenandoahHeap* heap = ShenandoahHeap::heap();
 942 
 943   ShenandoahRegionAffiliation region_affiliation = heap->region_affiliation(this);
 944   {
 945     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 946     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
 947                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
 948                   index(), affiliation_name(region_affiliation), affiliation_name(new_affiliation),
 949                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
 950   }
 951 
 952 #ifdef ASSERT
 953   {
 954     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
 955     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 956     size_t idx = this->index();
 957     HeapWord* top_bitmap = ctx->top_bitmap(this);
 958 
 959     assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
 960            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
 961            p2i(top_bitmap), p2i(_end));
 962   }
 963 #endif
 964 
 965   if (region_affiliation == new_affiliation) {
 966     return;
 967   }
 968 
 969   if (!heap->mode()->is_generational()) {
 970     heap->set_affiliation(this, new_affiliation);
 971     return;
 972   }
 973 
 974   log_trace(gc)("Changing affiliation of region %zu from %s to %s",
 975     index(), affiliation_name(region_affiliation), affiliation_name(new_affiliation));
 976 
 977   if (region_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
 978     heap->young_generation()->decrement_affiliated_region_count();
 979   } else if (region_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
 980     heap->old_generation()->decrement_affiliated_region_count();
 981   }
 982 
 983   size_t regions;
 984   switch (new_affiliation) {
 985     case FREE:
 986       assert(!has_live(), "Free region should not have live data");
 987       break;
 988     case YOUNG_GENERATION:
 989       reset_age();
 990       regions = heap->young_generation()->increment_affiliated_region_count();
 991       // During Full GC, we allow temporary violation of this requirement.  We enforce that this condition is
 992       // restored upon completion of Full GC.
 993       assert(heap->is_full_gc_in_progress() ||
 994              (regions * ShenandoahHeapRegion::region_size_bytes() <= heap->young_generation()->adjusted_capacity()),
 995              "Number of young regions cannot exceed adjusted capacity");
 996       break;
 997     case OLD_GENERATION:
 998       regions = heap->old_generation()->increment_affiliated_region_count();
 999       // During Full GC, we allow temporary violation of this requirement.  We enforce that this condition is
1000       // restored upon completion of Full GC.
1001       assert(heap->is_full_gc_in_progress() ||
1002              (regions * ShenandoahHeapRegion::region_size_bytes() <= heap->old_generation()->adjusted_capacity()),
1003              "Number of old regions cannot exceed adjusted capacity");
1004       break;
1005     default:
1006       ShouldNotReachHere();
1007       return;
1008   }
1009   heap->set_affiliation(this, new_affiliation);
1010 }
1011 
1012 // Returns number of regions promoted, or zero if we choose not to promote.
1013 size_t ShenandoahHeapRegion::promote_humongous() {
1014   ShenandoahHeap* heap = ShenandoahHeap::heap();
1015   ShenandoahMarkingContext* marking_context = heap->marking_context();
1016   assert(heap->active_generation()->is_mark_complete(), "sanity");
1017   assert(is_young(), "Only young regions can be promoted");
1018   assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
1019   assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
1020 
1021   ShenandoahGeneration* old_generation = heap->old_generation();
1022   ShenandoahGeneration* young_generation = heap->young_generation();
1023 
1024   oop obj = cast_to_oop(bottom());
1025   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
1026 
1027   // TODO: Consider not promoting humongous objects that represent primitive arrays.  Leaving a primitive array
1028   // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not
1029   // scanned.  Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when
1030   // it becomes garbage.  Better to not make this change until sizes of young-gen and old-gen are completely
1031   // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
1032   // has carefully analyzed the required sizes of an application's young-gen and old-gen.
1033 
1034   size_t spanned_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize);
1035   size_t index_limit = index() + spanned_regions;
1036 
1037   {
1038     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
1039     // young to old.
1040     ShenandoahHeapLocker locker(heap->lock());
1041     size_t available_old_regions = old_generation->adjusted_unaffiliated_regions();
1042     if (spanned_regions <= available_old_regions) {
1043       log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
1044 
1045       // For this region and each humongous continuation region spanned by this humongous object, change
1046       // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
1047       // in the last humongous region that is not spanned by obj is currently not used.
1048       for (size_t i = index(); i < index_limit; i++) {
1049         ShenandoahHeapRegion* r = heap->get_region(i);
1050         log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
1051                       r->index(), p2i(r->bottom()), p2i(r->top()));
1052         // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
1053         r->set_affiliation(OLD_GENERATION);
1054         old_generation->increase_used(r->used());
1055         young_generation->decrease_used(r->used());
1056       }
1057       // Then fall through to finish the promotion after releasing the heap lock.
1058     } else {
1059       // There are not enough available old regions to promote this humongous region at this time, so defer promotion.
1060       // TODO: Consider allowing the promotion now, with the expectation that we can resize and/or collect OLD
1061       // momentarily to address the transient violation of budgets.  Some problems that need to be addressed in order
1062       // to allow transient violation of capacity budgets are:
1063       //  1. Various size_t subtractions assume usage is less than capacity, and thus assume there will be no
1064       //     arithmetic underflow when we subtract usage from capacity.  The results of such size_t subtractions
1065       //     would need to be guarded and special handling provided.
1066       //  2. ShenandoahVerifier enforces that usage is less than capacity.  If we are going to relax this constraint,
1067       //     we need to think about what conditions allow the constraint to be violated and document and implement the
1068       //     changes.
1069       return 0;
1070     }
1071   }
1072 
1073   // Since this region may have served previously as OLD, it may hold obsolete object range info.
1074   heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
1075   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
1076   heap->card_scan()->register_object_wo_lock(bottom());
1077 
1078   if (obj->is_typeArray()) {
1079     // Primitive arrays don't need to be scanned.
1080     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1081                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
1082     heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
1083   } else {
1084     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1085                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
1086     heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
1087   }
1088   return index_limit - index();
1089 }