< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp

Print this page

   1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.

   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"

  27 #include "gc/shared/space.inline.hpp"
  28 #include "gc/shared/tlab_globals.hpp"


  29 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  32 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"




  33 #include "jfr/jfrEvents.hpp"
  34 #include "memory/allocation.hpp"
  35 #include "memory/iterator.inline.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/globals_extension.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/mutexLocker.hpp"
  43 #include "runtime/os.hpp"
  44 #include "runtime/safepoint.hpp"
  45 #include "utilities/powerOfTwo.hpp"
  46 

  47 size_t ShenandoahHeapRegion::RegionCount = 0;
  48 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  49 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  50 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  51 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  52 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  53 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  54 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  55 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  56 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  57 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  58 
  59 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
  60   _index(index),
  61   _bottom(start),
  62   _end(start + RegionSizeWords),
  63   _new_top(nullptr),
  64   _empty_time(os::elapsedTime()),

  65   _state(committed ? _empty_committed : _empty_uncommitted),
  66   _top(start),
  67   _tlab_allocs(0),
  68   _gclab_allocs(0),


  69   _live_data(0),
  70   _critical_pins(0),
  71   _update_watermark(start) {





  72 
  73   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
  74          "invalid space boundaries");
  75   if (ZapUnusedHeapArea && committed) {
  76     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
  77   }
  78 }
  79 
  80 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  81   stringStream ss;
  82   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
  83   print_on(&ss);
  84   fatal("%s", ss.freeze());
  85 }
  86 
  87 void ShenandoahHeapRegion::make_regular_allocation() {
  88   shenandoah_assert_heaplocked();
  89 
  90   switch (_state) {
  91     case _empty_uncommitted:
  92       do_commit();
  93     case _empty_committed:

  94       set_state(_regular);
  95     case _regular:
  96     case _pinned:
  97       return;
  98     default:
  99       report_illegal_transition("regular allocation");
 100   }
 101 }
 102 



























 103 void ShenandoahHeapRegion::make_regular_bypass() {
 104   shenandoah_assert_heaplocked();
 105   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
 106           "only for full or degen GC");
 107 
 108   switch (_state) {
 109     case _empty_uncommitted:
 110       do_commit();
 111     case _empty_committed:
 112     case _cset:
 113     case _humongous_start:
 114     case _humongous_cont:
 115       set_state(_regular);
 116       return;
 117     case _pinned_cset:
 118       set_state(_pinned);
 119       return;
 120     case _regular:
 121     case _pinned:
 122       return;
 123     default:
 124       report_illegal_transition("regular bypass");
 125   }
 126 }
 127 
 128 void ShenandoahHeapRegion::make_humongous_start() {
 129   shenandoah_assert_heaplocked();

 130   switch (_state) {
 131     case _empty_uncommitted:
 132       do_commit();
 133     case _empty_committed:
 134       set_state(_humongous_start);
 135       return;
 136     default:
 137       report_illegal_transition("humongous start allocation");
 138   }
 139 }
 140 
 141 void ShenandoahHeapRegion::make_humongous_start_bypass() {
 142   shenandoah_assert_heaplocked();
 143   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 144 


 145   switch (_state) {
 146     case _empty_committed:
 147     case _regular:
 148     case _humongous_start:
 149     case _humongous_cont:
 150       set_state(_humongous_start);
 151       return;
 152     default:
 153       report_illegal_transition("humongous start bypass");
 154   }
 155 }
 156 
 157 void ShenandoahHeapRegion::make_humongous_cont() {
 158   shenandoah_assert_heaplocked();

 159   switch (_state) {
 160     case _empty_uncommitted:
 161       do_commit();
 162     case _empty_committed:
 163      set_state(_humongous_cont);
 164       return;
 165     default:
 166       report_illegal_transition("humongous continuation allocation");
 167   }
 168 }
 169 
 170 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
 171   shenandoah_assert_heaplocked();
 172   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 173 


 174   switch (_state) {
 175     case _empty_committed:
 176     case _regular:
 177     case _humongous_start:
 178     case _humongous_cont:
 179       set_state(_humongous_cont);
 180       return;
 181     default:
 182       report_illegal_transition("humongous continuation bypass");
 183   }
 184 }
 185 
 186 void ShenandoahHeapRegion::make_pinned() {
 187   shenandoah_assert_heaplocked();
 188   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
 189 
 190   switch (_state) {
 191     case _regular:
 192       set_state(_pinned);
 193     case _pinned_cset:
 194     case _pinned:
 195       return;
 196     case _humongous_start:
 197       set_state(_pinned_humongous_start);
 198     case _pinned_humongous_start:
 199       return;
 200     case _cset:
 201       _state = _pinned_cset;
 202       return;
 203     default:
 204       report_illegal_transition("pinning");
 205   }
 206 }
 207 
 208 void ShenandoahHeapRegion::make_unpinned() {
 209   shenandoah_assert_heaplocked();
 210   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
 211 
 212   switch (_state) {
 213     case _pinned:

 214       set_state(_regular);
 215       return;
 216     case _regular:
 217     case _humongous_start:
 218       return;
 219     case _pinned_cset:
 220       set_state(_cset);
 221       return;
 222     case _pinned_humongous_start:
 223       set_state(_humongous_start);
 224       return;
 225     default:
 226       report_illegal_transition("unpinning");
 227   }
 228 }
 229 
 230 void ShenandoahHeapRegion::make_cset() {
 231   shenandoah_assert_heaplocked();

 232   switch (_state) {
 233     case _regular:
 234       set_state(_cset);
 235     case _cset:
 236       return;
 237     default:
 238       report_illegal_transition("cset");
 239   }
 240 }
 241 
 242 void ShenandoahHeapRegion::make_trash() {
 243   shenandoah_assert_heaplocked();

 244   switch (_state) {
 245     case _cset:
 246       // Reclaiming cset regions
 247     case _humongous_start:
 248     case _humongous_cont:
 249       // Reclaiming humongous regions






 250     case _regular:
 251       // Immediate region reclaim
 252       set_state(_trash);
 253       return;
 254     default:
 255       report_illegal_transition("trashing");
 256   }
 257 }
 258 
 259 void ShenandoahHeapRegion::make_trash_immediate() {
 260   make_trash();
 261 
 262   // On this path, we know there are no marked objects in the region,
 263   // tell marking context about it to bypass bitmap resets.
 264   ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);

 265 }
 266 
 267 void ShenandoahHeapRegion::make_empty() {
 268   shenandoah_assert_heaplocked();


 269   switch (_state) {
 270     case _trash:
 271       set_state(_empty_committed);
 272       _empty_time = os::elapsedTime();
 273       return;
 274     default:
 275       report_illegal_transition("emptying");
 276   }
 277 }
 278 
 279 void ShenandoahHeapRegion::make_uncommitted() {
 280   shenandoah_assert_heaplocked();
 281   switch (_state) {
 282     case _empty_committed:
 283       do_uncommit();
 284       set_state(_empty_uncommitted);
 285       return;
 286     default:
 287       report_illegal_transition("uncommiting");
 288   }
 289 }
 290 
 291 void ShenandoahHeapRegion::make_committed_bypass() {
 292   shenandoah_assert_heaplocked();
 293   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 294 
 295   switch (_state) {
 296     case _empty_uncommitted:
 297       do_commit();
 298       set_state(_empty_committed);
 299       return;
 300     default:
 301       report_illegal_transition("commit bypass");
 302   }
 303 }
 304 
 305 void ShenandoahHeapRegion::reset_alloc_metadata() {
 306   _tlab_allocs = 0;
 307   _gclab_allocs = 0;

 308 }
 309 
 310 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 311   return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
 312 }
 313 
 314 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 315   return _tlab_allocs * HeapWordSize;
 316 }
 317 
 318 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 319   return _gclab_allocs * HeapWordSize;
 320 }
 321 




 322 void ShenandoahHeapRegion::set_live_data(size_t s) {
 323   assert(Thread::current()->is_VM_thread(), "by VM thread");
 324   _live_data = (s >> LogHeapWordSize);
 325 }
 326 
 327 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 328   st->print("|");
 329   st->print(SIZE_FORMAT_W(5), this->_index);
 330 
 331   switch (_state) {
 332     case _empty_uncommitted:
 333       st->print("|EU ");
 334       break;
 335     case _empty_committed:
 336       st->print("|EC ");
 337       break;
 338     case _regular:
 339       st->print("|R  ");
 340       break;
 341     case _humongous_start:

 346       break;
 347     case _humongous_cont:
 348       st->print("|HC ");
 349       break;
 350     case _cset:
 351       st->print("|CS ");
 352       break;
 353     case _trash:
 354       st->print("|TR ");
 355       break;
 356     case _pinned:
 357       st->print("|P  ");
 358       break;
 359     case _pinned_cset:
 360       st->print("|CSP");
 361       break;
 362     default:
 363       ShouldNotReachHere();
 364   }
 365 


 366 #define SHR_PTR_FORMAT "%12" PRIxPTR
 367 
 368   st->print("|BTE " SHR_PTR_FORMAT  ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
 369             p2i(bottom()), p2i(top()), p2i(end()));
 370   st->print("|TAMS " SHR_PTR_FORMAT,
 371             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 372   st->print("|UWM " SHR_PTR_FORMAT,
 373             p2i(_update_watermark));
 374   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 375   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 376   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));



 377   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 378   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 379   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
 380   st->cr();
 381 
 382 #undef SHR_PTR_FORMAT
 383 }
 384 
 385 void ShenandoahHeapRegion::oop_iterate(OopIterateClosure* blk) {































































































 386   if (!is_active()) return;
 387   if (is_humongous()) {


 388     oop_iterate_humongous(blk);
 389   } else {
 390     oop_iterate_objects(blk);
 391   }
 392 }
 393 
 394 void ShenandoahHeapRegion::oop_iterate_objects(OopIterateClosure* blk) {
 395   assert(! is_humongous(), "no humongous region here");
 396   HeapWord* obj_addr = bottom();
 397   HeapWord* t = top();
 398   // Could call objects iterate, but this is easier.




























 399   while (obj_addr < t) {
 400     oop obj = cast_to_oop(obj_addr);
 401     obj_addr += obj->oop_iterate_size(blk);
 402   }
 403 }
 404 


















































 405 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
 406   assert(is_humongous(), "only humongous region here");
 407   // Find head.
 408   ShenandoahHeapRegion* r = humongous_start_region();
 409   assert(r->is_humongous_start(), "need humongous head here");
 410   oop obj = cast_to_oop(r->bottom());
 411   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 412 }
 413 
 414 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 415   ShenandoahHeap* heap = ShenandoahHeap::heap();
 416   assert(is_humongous(), "Must be a part of the humongous region");
 417   size_t i = index();
 418   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 419   while (!r->is_humongous_start()) {
 420     assert(i > 0, "Sanity");
 421     i--;
 422     r = heap->get_region(i);
 423     assert(r->is_humongous(), "Must be a part of the humongous region");
 424   }
 425   assert(r->is_humongous_start(), "Must be");
 426   return r;
 427 }
 428 
 429 void ShenandoahHeapRegion::recycle() {





 430   set_top(bottom());
 431   clear_live_data();
 432 
 433   reset_alloc_metadata();
 434 
 435   ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
 436   set_update_watermark(bottom());
 437 
 438   make_empty();
 439 

 440   if (ZapUnusedHeapArea) {
 441     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
 442   }
 443 }
 444 
 445 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
 446   assert(MemRegion(bottom(), end()).contains(p),
 447          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 448          p2i(p), p2i(bottom()), p2i(end()));
 449   if (p >= top()) {
 450     return top();
 451   } else {
 452     HeapWord* last = bottom();
 453     HeapWord* cur = last;
 454     while (cur <= p) {
 455       last = cur;
 456       cur += cast_to_oop(cur)->size();
 457     }
 458     shenandoah_assert_correct(nullptr, cast_to_oop(last));
 459     return last;

 463 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
 464   assert(MemRegion(bottom(), end()).contains(p),
 465          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 466          p2i(p), p2i(bottom()), p2i(end()));
 467   if (p < top()) {
 468     return cast_to_oop(p)->size();
 469   } else {
 470     assert(p == top(), "just checking");
 471     return pointer_delta(end(), (HeapWord*) p);
 472   }
 473 }
 474 
 475 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
 476   // Absolute minimums we should not ever break.
 477   static const size_t MIN_REGION_SIZE = 256*K;
 478 
 479   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 480     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 481   }
 482 





 483   size_t region_size;
 484   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
 485     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 486       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 487                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
 488                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 489                       MIN_NUM_REGIONS,
 490                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
 491       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 492     }
 493     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 494       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
 495                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 496                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 497       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 498     }
 499     if (ShenandoahMinRegionSize < MinTLABSize) {
 500       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
 501                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 502                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));

 669     evt.set_used(used());
 670     evt.set_from(_state);
 671     evt.set_to(to);
 672     evt.commit();
 673   }
 674   _state = to;
 675 }
 676 
 677 void ShenandoahHeapRegion::record_pin() {
 678   Atomic::add(&_critical_pins, (size_t)1);
 679 }
 680 
 681 void ShenandoahHeapRegion::record_unpin() {
 682   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
 683   Atomic::sub(&_critical_pins, (size_t)1);
 684 }
 685 
 686 size_t ShenandoahHeapRegion::pin_count() const {
 687   return Atomic::load(&_critical_pins);
 688 }



































































































































































































































   1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6  *
   7  * This code is free software; you can redistribute it and/or modify it
   8  * under the terms of the GNU General Public License version 2 only, as
   9  * published by the Free Software Foundation.
  10  *
  11  * This code is distributed in the hope that it will be useful, but WITHOUT
  12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  14  * version 2 for more details (a copy is included in the LICENSE file that
  15  * accompanied this code).
  16  *
  17  * You should have received a copy of the GNU General Public License version
  18  * 2 along with this work; if not, write to the Free Software Foundation,
  19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  20  *
  21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  22  * or visit www.oracle.com if you need additional information or have any
  23  * questions.
  24  *
  25  */
  26 
  27 #include "precompiled.hpp"
  28 #include "gc/shared/cardTable.hpp"
  29 #include "gc/shared/space.inline.hpp"
  30 #include "gc/shared/tlab_globals.hpp"
  31 #include "gc/shenandoah/shenandoahCardTable.hpp"
  32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  33 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  38 #include "gc/shenandoah/shenandoahGeneration.hpp"
  39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
  41 #include "jfr/jfrEvents.hpp"
  42 #include "memory/allocation.hpp"
  43 #include "memory/iterator.inline.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/universe.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/atomic.hpp"
  48 #include "runtime/globals_extension.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "runtime/os.hpp"
  52 #include "runtime/safepoint.hpp"
  53 #include "utilities/powerOfTwo.hpp"
  54 
  55 
  56 size_t ShenandoahHeapRegion::RegionCount = 0;
  57 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  58 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  59 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  60 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  61 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  62 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  63 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  64 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  65 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  66 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  67 
  68 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
  69   _index(index),
  70   _bottom(start),
  71   _end(start + RegionSizeWords),
  72   _new_top(nullptr),
  73   _empty_time(os::elapsedTime()),
  74   _top_before_promoted(nullptr),
  75   _state(committed ? _empty_committed : _empty_uncommitted),
  76   _top(start),
  77   _tlab_allocs(0),
  78   _gclab_allocs(0),
  79   _plab_allocs(0),
  80   _has_young_lab(false),
  81   _live_data(0),
  82   _critical_pins(0),
  83   _update_watermark(start),
  84   _age(0)
  85 #ifdef SHENANDOAH_CENSUS_NOISE
  86   , _youth(0)
  87 #endif // SHENANDOAH_CENSUS_NOISE
  88   {
  89 
  90   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
  91          "invalid space boundaries");
  92   if (ZapUnusedHeapArea && committed) {
  93     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
  94   }
  95 }
  96 
  97 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  98   stringStream ss;
  99   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
 100   print_on(&ss);
 101   fatal("%s", ss.freeze());
 102 }
 103 
 104 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) {
 105   shenandoah_assert_heaplocked();
 106   reset_age();
 107   switch (_state) {
 108     case _empty_uncommitted:
 109       do_commit();
 110     case _empty_committed:
 111       assert(this->affiliation() == affiliation, "Region affiliation should already be established");
 112       set_state(_regular);
 113     case _regular:
 114     case _pinned:
 115       return;
 116     default:
 117       report_illegal_transition("regular allocation");
 118   }
 119 }
 120 
 121 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
 122 // behavior previously performed as a side effect of make_regular_bypass().
 123 void ShenandoahHeapRegion::make_young_maybe() {
 124   shenandoah_assert_heaplocked();
 125   switch (_state) {
 126    case _empty_uncommitted:
 127    case _empty_committed:
 128    case _cset:
 129    case _humongous_start:
 130    case _humongous_cont:
 131      if (affiliation() != YOUNG_GENERATION) {
 132        if (is_old()) {
 133          ShenandoahHeap::heap()->old_generation()->decrement_affiliated_region_count();
 134        }
 135        set_affiliation(YOUNG_GENERATION);
 136        ShenandoahHeap::heap()->young_generation()->increment_affiliated_region_count();
 137      }
 138      return;
 139    case _pinned_cset:
 140    case _regular:
 141    case _pinned:
 142      return;
 143    default:
 144      assert(false, "Unexpected _state in make_young_maybe");
 145   }
 146 }
 147 
 148 void ShenandoahHeapRegion::make_regular_bypass() {
 149   shenandoah_assert_heaplocked();
 150   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
 151           "only for full or degen GC");
 152   reset_age();
 153   switch (_state) {
 154     case _empty_uncommitted:
 155       do_commit();
 156     case _empty_committed:
 157     case _cset:
 158     case _humongous_start:
 159     case _humongous_cont:
 160       set_state(_regular);
 161       return;
 162     case _pinned_cset:
 163       set_state(_pinned);
 164       return;
 165     case _regular:
 166     case _pinned:
 167       return;
 168     default:
 169       report_illegal_transition("regular bypass");
 170   }
 171 }
 172 
 173 void ShenandoahHeapRegion::make_humongous_start() {
 174   shenandoah_assert_heaplocked();
 175   reset_age();
 176   switch (_state) {
 177     case _empty_uncommitted:
 178       do_commit();
 179     case _empty_committed:
 180       set_state(_humongous_start);
 181       return;
 182     default:
 183       report_illegal_transition("humongous start allocation");
 184   }
 185 }
 186 
 187 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) {
 188   shenandoah_assert_heaplocked();
 189   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 190   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
 191   set_affiliation(affiliation);
 192   reset_age();
 193   switch (_state) {
 194     case _empty_committed:
 195     case _regular:
 196     case _humongous_start:
 197     case _humongous_cont:
 198       set_state(_humongous_start);
 199       return;
 200     default:
 201       report_illegal_transition("humongous start bypass");
 202   }
 203 }
 204 
 205 void ShenandoahHeapRegion::make_humongous_cont() {
 206   shenandoah_assert_heaplocked();
 207   reset_age();
 208   switch (_state) {
 209     case _empty_uncommitted:
 210       do_commit();
 211     case _empty_committed:
 212      set_state(_humongous_cont);
 213       return;
 214     default:
 215       report_illegal_transition("humongous continuation allocation");
 216   }
 217 }
 218 
 219 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) {
 220   shenandoah_assert_heaplocked();
 221   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 222   set_affiliation(affiliation);
 223   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
 224   reset_age();
 225   switch (_state) {
 226     case _empty_committed:
 227     case _regular:
 228     case _humongous_start:
 229     case _humongous_cont:
 230       set_state(_humongous_cont);
 231       return;
 232     default:
 233       report_illegal_transition("humongous continuation bypass");
 234   }
 235 }
 236 
 237 void ShenandoahHeapRegion::make_pinned() {
 238   shenandoah_assert_heaplocked();
 239   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
 240 
 241   switch (_state) {
 242     case _regular:
 243       set_state(_pinned);
 244     case _pinned_cset:
 245     case _pinned:
 246       return;
 247     case _humongous_start:
 248       set_state(_pinned_humongous_start);
 249     case _pinned_humongous_start:
 250       return;
 251     case _cset:
 252       _state = _pinned_cset;
 253       return;
 254     default:
 255       report_illegal_transition("pinning");
 256   }
 257 }
 258 
 259 void ShenandoahHeapRegion::make_unpinned() {
 260   shenandoah_assert_heaplocked();
 261   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
 262 
 263   switch (_state) {
 264     case _pinned:
 265       assert(is_affiliated(), "Pinned region should be affiliated");
 266       set_state(_regular);
 267       return;
 268     case _regular:
 269     case _humongous_start:
 270       return;
 271     case _pinned_cset:
 272       set_state(_cset);
 273       return;
 274     case _pinned_humongous_start:
 275       set_state(_humongous_start);
 276       return;
 277     default:
 278       report_illegal_transition("unpinning");
 279   }
 280 }
 281 
 282 void ShenandoahHeapRegion::make_cset() {
 283   shenandoah_assert_heaplocked();
 284   // Leave age untouched.  We need to consult the age when we are deciding whether to promote evacuated objects.
 285   switch (_state) {
 286     case _regular:
 287       set_state(_cset);
 288     case _cset:
 289       return;
 290     default:
 291       report_illegal_transition("cset");
 292   }
 293 }
 294 
 295 void ShenandoahHeapRegion::make_trash() {
 296   shenandoah_assert_heaplocked();
 297   reset_age();
 298   switch (_state) {


 299     case _humongous_start:
 300     case _humongous_cont:
 301     {
 302       // Reclaiming humongous regions and reclaim humongous waste.  When this region is eventually recycled, we'll reclaim
 303       // its used memory.  At recycle time, we no longer recognize this as a humongous region.
 304       decrement_humongous_waste();
 305     }
 306     case _cset:
 307       // Reclaiming cset regions
 308     case _regular:
 309       // Immediate region reclaim
 310       set_state(_trash);
 311       return;
 312     default:
 313       report_illegal_transition("trashing");
 314   }
 315 }
 316 
 317 void ShenandoahHeapRegion::make_trash_immediate() {
 318   make_trash();
 319 
 320   // On this path, we know there are no marked objects in the region,
 321   // tell marking context about it to bypass bitmap resets.
 322   assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
 323   ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
 324 }
 325 
 326 void ShenandoahHeapRegion::make_empty() {
 327   shenandoah_assert_heaplocked();
 328   reset_age();
 329   CENSUS_NOISE(clear_youth();)
 330   switch (_state) {
 331     case _trash:
 332       set_state(_empty_committed);
 333       _empty_time = os::elapsedTime();
 334       return;
 335     default:
 336       report_illegal_transition("emptying");
 337   }
 338 }
 339 
 340 void ShenandoahHeapRegion::make_uncommitted() {
 341   shenandoah_assert_heaplocked();
 342   switch (_state) {
 343     case _empty_committed:
 344       do_uncommit();
 345       set_state(_empty_uncommitted);
 346       return;
 347     default:
 348       report_illegal_transition("uncommiting");
 349   }
 350 }
 351 
 352 void ShenandoahHeapRegion::make_committed_bypass() {
 353   shenandoah_assert_heaplocked();
 354   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 355 
 356   switch (_state) {
 357     case _empty_uncommitted:
 358       do_commit();
 359       set_state(_empty_committed);
 360       return;
 361     default:
 362       report_illegal_transition("commit bypass");
 363   }
 364 }
 365 
 366 void ShenandoahHeapRegion::reset_alloc_metadata() {
 367   _tlab_allocs = 0;
 368   _gclab_allocs = 0;
 369   _plab_allocs = 0;
 370 }
 371 
 372 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 373   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
 374 }
 375 
 376 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 377   return _tlab_allocs * HeapWordSize;
 378 }
 379 
 380 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 381   return _gclab_allocs * HeapWordSize;
 382 }
 383 
 384 size_t ShenandoahHeapRegion::get_plab_allocs() const {
 385   return _plab_allocs * HeapWordSize;
 386 }
 387 
 388 void ShenandoahHeapRegion::set_live_data(size_t s) {
 389   assert(Thread::current()->is_VM_thread(), "by VM thread");
 390   _live_data = (s >> LogHeapWordSize);
 391 }
 392 
 393 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 394   st->print("|");
 395   st->print(SIZE_FORMAT_W(5), this->_index);
 396 
 397   switch (_state) {
 398     case _empty_uncommitted:
 399       st->print("|EU ");
 400       break;
 401     case _empty_committed:
 402       st->print("|EC ");
 403       break;
 404     case _regular:
 405       st->print("|R  ");
 406       break;
 407     case _humongous_start:

 412       break;
 413     case _humongous_cont:
 414       st->print("|HC ");
 415       break;
 416     case _cset:
 417       st->print("|CS ");
 418       break;
 419     case _trash:
 420       st->print("|TR ");
 421       break;
 422     case _pinned:
 423       st->print("|P  ");
 424       break;
 425     case _pinned_cset:
 426       st->print("|CSP");
 427       break;
 428     default:
 429       ShouldNotReachHere();
 430   }
 431 
 432   st->print("|%s", shenandoah_affiliation_code(affiliation()));
 433 
 434 #define SHR_PTR_FORMAT "%12" PRIxPTR
 435 
 436   st->print("|BTE " SHR_PTR_FORMAT  ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
 437             p2i(bottom()), p2i(top()), p2i(end()));
 438   st->print("|TAMS " SHR_PTR_FORMAT,
 439             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 440   st->print("|UWM " SHR_PTR_FORMAT,
 441             p2i(_update_watermark));
 442   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 443   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 444   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
 445   if (ShenandoahHeap::heap()->mode()->is_generational()) {
 446     st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
 447   }
 448   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 449   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 450   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
 451   st->cr();
 452 
 453 #undef SHR_PTR_FORMAT
 454 }
 455 
 456 // oop_iterate without closure and without cancellation.  always return true.
 457 bool ShenandoahHeapRegion::oop_fill_and_coalesce_without_cancel() {
 458   HeapWord* obj_addr = resume_coalesce_and_fill();
 459 
 460   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
 461   if (!is_active()) {
 462     end_preemptible_coalesce_and_fill();
 463     return true;
 464   }
 465 
 466   ShenandoahHeap* heap = ShenandoahHeap::heap();
 467   ShenandoahMarkingContext* marking_context = heap->marking_context();
 468   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
 469   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
 470   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
 471   // and will be treated as live during the current old-gen marking pass, even though they will not be
 472   // explicitly marked.
 473   HeapWord* t = marking_context->top_at_mark_start(this);
 474 
 475   // Expect marking to be completed before these threads invoke this service.
 476   assert(heap->active_generation()->is_mark_complete(), "sanity");
 477   while (obj_addr < t) {
 478     oop obj = cast_to_oop(obj_addr);
 479     if (marking_context->is_marked(obj)) {
 480       assert(obj->klass() != nullptr, "klass should not be nullptr");
 481       obj_addr += obj->size();
 482     } else {
 483       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 484       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 485       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 486       size_t fill_size = next_marked_obj - obj_addr;
 487       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
 488       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 489       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
 490       obj_addr = next_marked_obj;
 491     }
 492   }
 493   // Mark that this region has been coalesced and filled
 494   end_preemptible_coalesce_and_fill();
 495   return true;
 496 }
 497 
 498 // oop_iterate without closure, return true if completed without cancellation
 499 bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
 500   HeapWord* obj_addr = resume_coalesce_and_fill();
 501   // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
 502   const size_t preemption_stride = 128;
 503 
 504   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
 505   if (!is_active()) {
 506     end_preemptible_coalesce_and_fill();
 507     return true;
 508   }
 509 
 510   ShenandoahHeap* heap = ShenandoahHeap::heap();
 511   ShenandoahMarkingContext* marking_context = heap->marking_context();
 512   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
 513   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
 514   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
 515   // and will be treated as live during the current old-gen marking pass, even though they will not be
 516   // explicitly marked.
 517   HeapWord* t = marking_context->top_at_mark_start(this);
 518 
 519   // Expect marking to be completed before these threads invoke this service.
 520   assert(heap->active_generation()->is_mark_complete(), "sanity");
 521 
 522   size_t ops_before_preempt_check = preemption_stride;
 523   while (obj_addr < t) {
 524     oop obj = cast_to_oop(obj_addr);
 525     if (marking_context->is_marked(obj)) {
 526       assert(obj->klass() != nullptr, "klass should not be nullptr");
 527       obj_addr += obj->size();
 528     } else {
 529       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 530       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 531       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 532       size_t fill_size = next_marked_obj - obj_addr;
 533       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
 534       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 535       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
 536       obj_addr = next_marked_obj;
 537     }
 538     if (ops_before_preempt_check-- == 0) {
 539       if (heap->cancelled_gc()) {
 540         suspend_coalesce_and_fill(obj_addr);
 541         return false;
 542       }
 543       ops_before_preempt_check = preemption_stride;
 544     }
 545   }
 546   // Mark that this region has been coalesced and filled
 547   end_preemptible_coalesce_and_fill();
 548   return true;
 549 }
 550 
 551 void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
 552   if (!is_active()) return;
 553   if (is_humongous()) {
 554     // No need to fill dead within humongous regions.  Either the entire region is dead, or the entire region is
 555     // unchanged.  A humongous region holds no more than one humongous object.
 556     oop_iterate_humongous(blk);
 557   } else {
 558     global_oop_iterate_objects_and_fill_dead(blk);
 559   }
 560 }
 561 
 562 void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
 563   assert(!is_humongous(), "no humongous region here");
 564   HeapWord* obj_addr = bottom();
 565 
 566   ShenandoahHeap* heap = ShenandoahHeap::heap();
 567   ShenandoahMarkingContext* marking_context = heap->marking_context();
 568   RememberedScanner* rem_set_scanner = heap->card_scan();
 569   // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
 570   HeapWord* t = marking_context->top_at_mark_start(this);
 571 
 572   assert(heap->active_generation()->is_mark_complete(), "sanity");
 573 
 574   while (obj_addr < t) {
 575     oop obj = cast_to_oop(obj_addr);
 576     if (marking_context->is_marked(obj)) {
 577       assert(obj->klass() != nullptr, "klass should not be nullptr");
 578       // when promoting an entire region, we have to register the marked objects as well
 579       obj_addr += obj->oop_iterate_size(blk);
 580     } else {
 581       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
 582       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
 583       assert(next_marked_obj <= t, "next marked object cannot exceed top");
 584       size_t fill_size = next_marked_obj - obj_addr;
 585       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
 586       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
 587       // coalesce_objects() unregisters all but first object subsumed within coalesced range.
 588       rem_set_scanner->coalesce_objects(obj_addr, fill_size);
 589       obj_addr = next_marked_obj;
 590     }
 591   }
 592 
 593   // Any object above TAMS and below top() is considered live.
 594   t = top();
 595   while (obj_addr < t) {
 596     oop obj = cast_to_oop(obj_addr);
 597     obj_addr += obj->oop_iterate_size(blk);
 598   }
 599 }
 600 
 601 // DO NOT CANCEL.  If this worker thread has accepted responsibility for scanning a particular range of addresses, it
 602 // must finish the work before it can be cancelled.
 603 void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only,
 604                                                        HeapWord* start, size_t words, bool write_table) {
 605   assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
 606   assert(is_humongous(), "only humongous region here");
 607   ShenandoahHeap* heap = ShenandoahHeap::heap();
 608 
 609   // Find head.
 610   ShenandoahHeapRegion* r = humongous_start_region();
 611   assert(r->is_humongous_start(), "need humongous head here");
 612   assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
 613          "slice must be integral number of cards");
 614 
 615   oop obj = cast_to_oop(r->bottom());
 616   RememberedScanner* scanner = ShenandoahHeap::heap()->card_scan();
 617   size_t card_index = scanner->card_index_for_addr(start);
 618   size_t num_cards = words / CardTable::card_size_in_words();
 619 
 620   if (dirty_only) {
 621     if (write_table) {
 622       while (num_cards-- > 0) {
 623         if (scanner->is_write_card_dirty(card_index++)) {
 624           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
 625         }
 626         start += CardTable::card_size_in_words();
 627       }
 628     } else {
 629       while (num_cards-- > 0) {
 630         if (scanner->is_card_dirty(card_index++)) {
 631           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
 632         }
 633         start += CardTable::card_size_in_words();
 634       }
 635     }
 636   } else {
 637     // Scan all data, regardless of whether cards are dirty
 638     obj->oop_iterate(blk, MemRegion(start, start + num_cards * CardTable::card_size_in_words()));
 639   }
 640 }
 641 
 642 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk, HeapWord* start, size_t words) {
 643   assert(is_humongous(), "only humongous region here");
 644   // Find head.
 645   ShenandoahHeapRegion* r = humongous_start_region();
 646   assert(r->is_humongous_start(), "need humongous head here");
 647   oop obj = cast_to_oop(r->bottom());
 648   obj->oop_iterate(blk, MemRegion(start, start + words));
 649 }
 650 
 651 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
 652   assert(is_humongous(), "only humongous region here");
 653   // Find head.
 654   ShenandoahHeapRegion* r = humongous_start_region();
 655   assert(r->is_humongous_start(), "need humongous head here");
 656   oop obj = cast_to_oop(r->bottom());
 657   obj->oop_iterate(blk, MemRegion(bottom(), top()));
 658 }
 659 
 660 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 661   ShenandoahHeap* heap = ShenandoahHeap::heap();
 662   assert(is_humongous(), "Must be a part of the humongous region");
 663   size_t i = index();
 664   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 665   while (!r->is_humongous_start()) {
 666     assert(i > 0, "Sanity");
 667     i--;
 668     r = heap->get_region(i);
 669     assert(r->is_humongous(), "Must be a part of the humongous region");
 670   }
 671   assert(r->is_humongous_start(), "Must be");
 672   return r;
 673 }
 674 
 675 void ShenandoahHeapRegion::recycle() {
 676   shenandoah_assert_heaplocked();
 677   ShenandoahHeap* heap = ShenandoahHeap::heap();
 678   ShenandoahGeneration* generation = heap->generation_for(affiliation());
 679   heap->decrease_used(generation, used());
 680 
 681   set_top(bottom());
 682   clear_live_data();
 683 
 684   reset_alloc_metadata();
 685 
 686   heap->marking_context()->reset_top_at_mark_start(this);
 687   set_update_watermark(bottom());
 688 
 689   make_empty();
 690   ShenandoahHeap::heap()->generation_for(affiliation())->decrement_affiliated_region_count();
 691   set_affiliation(FREE);
 692   if (ZapUnusedHeapArea) {
 693     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
 694   }
 695 }
 696 
 697 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
 698   assert(MemRegion(bottom(), end()).contains(p),
 699          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 700          p2i(p), p2i(bottom()), p2i(end()));
 701   if (p >= top()) {
 702     return top();
 703   } else {
 704     HeapWord* last = bottom();
 705     HeapWord* cur = last;
 706     while (cur <= p) {
 707       last = cur;
 708       cur += cast_to_oop(cur)->size();
 709     }
 710     shenandoah_assert_correct(nullptr, cast_to_oop(last));
 711     return last;

 715 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
 716   assert(MemRegion(bottom(), end()).contains(p),
 717          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 718          p2i(p), p2i(bottom()), p2i(end()));
 719   if (p < top()) {
 720     return cast_to_oop(p)->size();
 721   } else {
 722     assert(p == top(), "just checking");
 723     return pointer_delta(end(), (HeapWord*) p);
 724   }
 725 }
 726 
 727 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
 728   // Absolute minimums we should not ever break.
 729   static const size_t MIN_REGION_SIZE = 256*K;
 730 
 731   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
 732     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
 733   }
 734 
 735   // Generational Shenandoah needs this alignment for card tables.
 736   if (strcmp(ShenandoahGCMode, "generational") == 0) {
 737     max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint());
 738   }
 739 
 740   size_t region_size;
 741   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
 742     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 743       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 744                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
 745                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 746                       MIN_NUM_REGIONS,
 747                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
 748       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 749     }
 750     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 751       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
 752                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 753                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
 754       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 755     }
 756     if (ShenandoahMinRegionSize < MinTLABSize) {
 757       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
 758                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
 759                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));

 926     evt.set_used(used());
 927     evt.set_from(_state);
 928     evt.set_to(to);
 929     evt.commit();
 930   }
 931   _state = to;
 932 }
 933 
 934 void ShenandoahHeapRegion::record_pin() {
 935   Atomic::add(&_critical_pins, (size_t)1);
 936 }
 937 
 938 void ShenandoahHeapRegion::record_unpin() {
 939   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
 940   Atomic::sub(&_critical_pins, (size_t)1);
 941 }
 942 
 943 size_t ShenandoahHeapRegion::pin_count() const {
 944   return Atomic::load(&_critical_pins);
 945 }
 946 
 947 void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) {
 948   ShenandoahHeap* heap = ShenandoahHeap::heap();
 949 
 950   ShenandoahAffiliation region_affiliation = heap->region_affiliation(this);
 951   {
 952     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 953     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
 954                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
 955                   index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation),
 956                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
 957   }
 958 
 959 #ifdef ASSERT
 960   {
 961     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
 962     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
 963     size_t idx = this->index();
 964     HeapWord* top_bitmap = ctx->top_bitmap(this);
 965 
 966     assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
 967            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
 968            p2i(top_bitmap), p2i(_end));
 969   }
 970 #endif
 971 
 972   if (region_affiliation == new_affiliation) {
 973     return;
 974   }
 975 
 976   if (!heap->mode()->is_generational()) {
 977     log_trace(gc)("Changing affiliation of region %zu from %s to %s",
 978                   index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation));
 979     heap->set_affiliation(this, new_affiliation);
 980     return;
 981   }
 982 
 983   switch (new_affiliation) {
 984     case FREE:
 985       assert(!has_live(), "Free region should not have live data");
 986       break;
 987     case YOUNG_GENERATION:
 988       reset_age();
 989       break;
 990     case OLD_GENERATION:
 991       // TODO: should we reset_age() for OLD as well?  Examine invocations of set_affiliation(). Some contexts redundantly
 992       //       invoke reset_age().
 993       break;
 994     default:
 995       ShouldNotReachHere();
 996       return;
 997   }
 998   heap->set_affiliation(this, new_affiliation);
 999 }
1000 
1001 // When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
1002 // set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
1003 // We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting poitners"
1004 // contained herein.
1005 void ShenandoahHeapRegion::promote_in_place() {
1006   ShenandoahHeap* heap = ShenandoahHeap::heap();
1007   ShenandoahMarkingContext* marking_context = heap->marking_context();
1008   HeapWord* tams = marking_context->top_at_mark_start(this);
1009   assert(heap->active_generation()->is_mark_complete(), "sanity");
1010   assert(is_young(), "Only young regions can be promoted");
1011   assert(is_regular(), "Use different service to promote humongous regions");
1012   assert(age() >= heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
1013 
1014   ShenandoahOldGeneration* old_gen = heap->old_generation();
1015   ShenandoahYoungGeneration* young_gen = heap->young_generation();
1016   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1017 
1018   assert(get_top_before_promote() == tams, "Cannot promote regions in place if top has advanced beyond TAMS");
1019 
1020   // Rebuild the remembered set information and mark the entire range as DIRTY.  We do NOT scan the content of this
1021   // range to determine which cards need to be DIRTY.  That would force us to scan the region twice, once now, and
1022   // once during the subsequent remembered set scan.  Instead, we blindly (conservatively) mark everything as DIRTY
1023   // now and then sort out the CLEAN pages during the next remembered set scan.
1024   //
1025   // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
1026   // then registering every live object and every coalesced range of free objects in the loop that follows.
1027   heap->card_scan()->reset_object_range(bottom(), end());
1028   heap->card_scan()->mark_range_as_dirty(bottom(), get_top_before_promote() - bottom());
1029 
1030   // TODO: use an existing coalesce-and-fill function rather than replicating the code here.
1031   HeapWord* obj_addr = bottom();
1032   while (obj_addr < tams) {
1033     oop obj = cast_to_oop(obj_addr);
1034     if (marking_context->is_marked(obj)) {
1035       assert(obj->klass() != NULL, "klass should not be NULL");
1036       // This thread is responsible for registering all objects in this region.  No need for lock.
1037       heap->card_scan()->register_object_without_lock(obj_addr);
1038       obj_addr += obj->size();
1039     } else {
1040       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
1041       assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
1042       size_t fill_size = next_marked_obj - obj_addr;
1043       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
1044       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
1045       heap->card_scan()->register_object_without_lock(obj_addr);
1046       obj_addr = next_marked_obj;
1047     }
1048   }
1049   // We do not need to scan above TAMS because restored top equals tams
1050   assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
1051 
1052   {
1053     ShenandoahHeapLocker locker(heap->lock());
1054 
1055     HeapWord* update_watermark = get_update_watermark();
1056 
1057     // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
1058     // is_collector_free range.
1059     restore_top_before_promote();
1060 
1061     size_t region_capacity = free();
1062     size_t region_used = used();
1063 
1064     // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
1065     assert(update_watermark >= top(), "original top cannot exceed preserved update_watermark");
1066     set_update_watermark(top());
1067 
1068     // Unconditionally transfer one region from young to old to represent the newly promoted region.
1069     // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
1070     // if there are already enough unaffiliated regions in old to account for this newly promoted region.
1071     // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
1072     // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
1073     // we would be trading a fully empty region for a partially used region.
1074 
1075     young_gen->decrease_used(region_used);
1076     young_gen->decrement_affiliated_region_count();
1077 
1078     // transfer_to_old() increases capacity of old and decreases capacity of young
1079     heap->generation_sizer()->force_transfer_to_old(1);
1080     set_affiliation(OLD_GENERATION);
1081 
1082     old_gen->increment_affiliated_region_count();
1083     old_gen->increase_used(region_used);
1084 
1085     // add_old_collector_free_region() increases promoted_reserve() if available space exceeds PLAB::min_size()
1086     heap->free_set()->add_old_collector_free_region(this);
1087   }
1088 }
1089 
1090 void ShenandoahHeapRegion::promote_humongous() {
1091   ShenandoahHeap* heap = ShenandoahHeap::heap();
1092   ShenandoahMarkingContext* marking_context = heap->marking_context();
1093   assert(heap->active_generation()->is_mark_complete(), "sanity");
1094   assert(is_young(), "Only young regions can be promoted");
1095   assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
1096   assert(age() >= heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
1097 
1098   ShenandoahGeneration* old_generation = heap->old_generation();
1099   ShenandoahGeneration* young_generation = heap->young_generation();
1100 
1101   oop obj = cast_to_oop(bottom());
1102   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
1103 
1104   // TODO: Consider not promoting humongous objects that represent primitive arrays.  Leaving a primitive array
1105   // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not
1106   // scanned.  Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when
1107   // it becomes garbage.  Better to not make this change until sizes of young-gen and old-gen are completely
1108   // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
1109   // has carefully analyzed the required sizes of an application's young-gen and old-gen.
1110   size_t used_bytes = obj->size() * HeapWordSize;
1111   size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
1112   size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
1113   size_t index_limit = index() + spanned_regions;
1114   {
1115     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
1116     // young to old.
1117     ShenandoahHeapLocker locker(heap->lock());
1118 
1119     // We promote humongous objects unconditionally, without checking for availability.  We adjust
1120     // usage totals, including humongous waste, after evacuation is done.
1121     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
1122 
1123     young_generation->decrease_used(used_bytes);
1124     young_generation->decrease_humongous_waste(humongous_waste);
1125     young_generation->decrease_affiliated_region_count(spanned_regions);
1126 
1127     // transfer_to_old() increases capacity of old and decreases capacity of young
1128     heap->generation_sizer()->force_transfer_to_old(spanned_regions);
1129 
1130     // For this region and each humongous continuation region spanned by this humongous object, change
1131     // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
1132     // in the last humongous region that is not spanned by obj is currently not used.
1133     for (size_t i = index(); i < index_limit; i++) {
1134       ShenandoahHeapRegion* r = heap->get_region(i);
1135       log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
1136                     r->index(), p2i(r->bottom()), p2i(r->top()));
1137       // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
1138       r->set_affiliation(OLD_GENERATION);
1139     }
1140 
1141     old_generation->increase_affiliated_region_count(spanned_regions);
1142     old_generation->increase_used(used_bytes);
1143     old_generation->increase_humongous_waste(humongous_waste);
1144   }
1145 
1146   // Since this region may have served previously as OLD, it may hold obsolete object range info.
1147   heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
1148   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
1149   heap->card_scan()->register_object_without_lock(bottom());
1150 
1151   if (obj->is_typeArray()) {
1152     // Primitive arrays don't need to be scanned.
1153     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1154                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
1155     heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
1156   } else {
1157     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
1158                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
1159     heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
1160   }
1161 }
1162 
1163 void ShenandoahHeapRegion::decrement_humongous_waste() const {
1164   assert(is_humongous(), "Should only use this for humongous regions");
1165   size_t waste_bytes = free();
1166   if (waste_bytes > 0) {
1167     ShenandoahHeap* heap = ShenandoahHeap::heap();
1168     ShenandoahGeneration* generation = heap->generation_for(affiliation());
1169     heap->decrease_humongous_waste(generation, waste_bytes);
1170   }
1171 }
< prev index next >