1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/allocation.hpp"
  27 #include "gc_implementation/shared/spaceDecorator.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  31 #include "jfr/jfrEvents.hpp"
  32 #include "memory/space.inline.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "runtime/java.hpp"
  37 #include "runtime/mutexLocker.hpp"
  38 #include "runtime/os.hpp"
  39 #include "runtime/safepoint.hpp"
  40 #include "utilities/align.hpp"
  41 
  42 size_t ShenandoahHeapRegion::RegionCount = 0;
  43 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
  44 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
  45 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
  46 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
  47 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
  48 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
  49 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
  50 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
  51 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
  52 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
  53 
  54 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
  55   _index(index),
  56   _bottom(start),
  57   _end(start + RegionSizeWords),
  58   _new_top(NULL),
  59   _empty_time(os::elapsedTime()),
  60   _state(committed ? _empty_committed : _empty_uncommitted),
  61   _top(start),
  62   _tlab_allocs(0),
  63   _gclab_allocs(0),
  64   _live_data(0),
  65   _critical_pins(0),
  66   _update_watermark(start) {
  67 
  68   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
  69          "invalid space boundaries");
  70   if (ZapUnusedHeapArea && committed) {
  71     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
  72   }
  73 }
  74 
  75 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
  76   ResourceMark rm;
  77   stringStream ss;
  78   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
  79   print_on(&ss);
  80   fatal(ss.as_string());
  81 }
  82 
  83 void ShenandoahHeapRegion::make_regular_allocation() {
  84   shenandoah_assert_heaplocked();
  85   switch (_state) {
  86     case _empty_uncommitted:
  87       do_commit();
  88     case _empty_committed:
  89       set_state(_regular);
  90     case _regular:
  91     case _pinned:
  92       return;
  93     default:
  94       report_illegal_transition("regular allocation");
  95   }
  96 }
  97 
  98 void ShenandoahHeapRegion::make_regular_bypass() {
  99   shenandoah_assert_heaplocked();
 100   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
 101           "only for full or degen GC");
 102 
 103   switch (_state) {
 104     case _empty_uncommitted:
 105       do_commit();
 106     case _empty_committed:
 107     case _cset:
 108     case _humongous_start:
 109     case _humongous_cont:
 110       set_state(_regular);
 111       return;
 112     case _pinned_cset:
 113       set_state(_pinned);
 114       return;
 115     case _regular:
 116     case _pinned:
 117       return;
 118     default:
 119       report_illegal_transition("regular bypass");
 120   }
 121 }
 122 
 123 void ShenandoahHeapRegion::make_humongous_start() {
 124   shenandoah_assert_heaplocked();
 125   switch (_state) {
 126     case _empty_uncommitted:
 127       do_commit();
 128     case _empty_committed:
 129       set_state(_humongous_start);
 130       return;
 131     default:
 132       report_illegal_transition("humongous start allocation");
 133   }
 134 }
 135 
 136 void ShenandoahHeapRegion::make_humongous_start_bypass() {
 137   shenandoah_assert_heaplocked();
 138   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 139 
 140   switch (_state) {
 141     case _empty_committed:
 142     case _regular:
 143     case _humongous_start:
 144     case _humongous_cont:
 145       set_state(_humongous_start);
 146       return;
 147     default:
 148       report_illegal_transition("humongous start bypass");
 149   }
 150 }
 151 
 152 void ShenandoahHeapRegion::make_humongous_cont() {
 153   shenandoah_assert_heaplocked();
 154   switch (_state) {
 155     case _empty_uncommitted:
 156       do_commit();
 157     case _empty_committed:
 158      set_state(_humongous_cont);
 159       return;
 160     default:
 161       report_illegal_transition("humongous continuation allocation");
 162   }
 163 }
 164 
 165 void ShenandoahHeapRegion::make_humongous_cont_bypass() {
 166   shenandoah_assert_heaplocked();
 167   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 168 
 169   switch (_state) {
 170     case _empty_committed:
 171     case _regular:
 172     case _humongous_start:
 173     case _humongous_cont:
 174       set_state(_humongous_cont);
 175       return;
 176     default:
 177       report_illegal_transition("humongous continuation bypass");
 178   }
 179 }
 180 
 181 void ShenandoahHeapRegion::make_pinned() {
 182   shenandoah_assert_heaplocked();
 183   assert(pin_count() > 0, err_msg("Should have pins: " SIZE_FORMAT, pin_count()));
 184 
 185   switch (_state) {
 186     case _regular:
 187       set_state(_pinned);
 188     case _pinned_cset:
 189     case _pinned:
 190       return;
 191     case _humongous_start:
 192       set_state(_pinned_humongous_start);
 193     case _pinned_humongous_start:
 194       return;
 195     case _cset:
 196       set_state(_pinned_cset);
 197       return;
 198     default:
 199       report_illegal_transition("pinning");
 200   }
 201 }
 202 
 203 void ShenandoahHeapRegion::make_unpinned() {
 204   shenandoah_assert_heaplocked();
 205   assert(pin_count() == 0, err_msg("Should not have pins: " SIZE_FORMAT, pin_count()));
 206 
 207   switch (_state) {
 208     case _pinned:
 209       set_state(_regular);
 210       return;
 211     case _regular:
 212     case _humongous_start:
 213       return;
 214     case _pinned_cset:
 215       set_state(_cset);
 216       return;
 217     case _pinned_humongous_start:
 218       set_state(_humongous_start);
 219       return;
 220     default:
 221       report_illegal_transition("unpinning");
 222   }
 223 }
 224 
 225 void ShenandoahHeapRegion::make_cset() {
 226   shenandoah_assert_heaplocked();
 227   switch (_state) {
 228     case _regular:
 229       set_state(_cset);
 230     case _cset:
 231       return;
 232     default:
 233       report_illegal_transition("cset");
 234   }
 235 }
 236 
 237 void ShenandoahHeapRegion::make_trash() {
 238   shenandoah_assert_heaplocked();
 239   switch (_state) {
 240     case _cset:
 241       // Reclaiming cset regions
 242     case _humongous_start:
 243     case _humongous_cont:
 244       // Reclaiming humongous regions
 245     case _regular:
 246       // Immediate region reclaim
 247       set_state(_trash);
 248       return;
 249     default:
 250       report_illegal_transition("trashing");
 251   }
 252 }
 253 
 254 void ShenandoahHeapRegion::make_trash_immediate() {
 255   make_trash();
 256 
 257   // On this path, we know there are no marked objects in the region,
 258   // tell marking context about it to bypass bitmap resets.
 259   ShenandoahHeap::heap()->complete_marking_context()->reset_top_bitmap(this);
 260 }
 261 
 262 void ShenandoahHeapRegion::make_empty() {
 263   shenandoah_assert_heaplocked();
 264   switch (_state) {
 265     case _trash:
 266       set_state(_empty_committed);
 267       _empty_time = os::elapsedTime();
 268       return;
 269     default:
 270       report_illegal_transition("emptying");
 271   }
 272 }
 273 
 274 void ShenandoahHeapRegion::make_uncommitted() {
 275   shenandoah_assert_heaplocked();
 276   switch (_state) {
 277     case _empty_committed:
 278       do_uncommit();
 279       set_state(_empty_uncommitted);
 280       return;
 281     default:
 282       report_illegal_transition("uncommiting");
 283   }
 284 }
 285 
 286 void ShenandoahHeapRegion::make_committed_bypass() {
 287   shenandoah_assert_heaplocked();
 288   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
 289 
 290   switch (_state) {
 291     case _empty_uncommitted:
 292       do_commit();
 293       set_state(_empty_committed);
 294       return;
 295     default:
 296       report_illegal_transition("commit bypass");
 297   }
 298 }
 299 
 300 void ShenandoahHeapRegion::reset_alloc_metadata() {
 301   _tlab_allocs = 0;
 302   _gclab_allocs = 0;
 303 }
 304 
 305 size_t ShenandoahHeapRegion::get_shared_allocs() const {
 306   return used() - (_tlab_allocs + _gclab_allocs) * HeapWordSize;
 307 }
 308 
 309 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
 310   return _tlab_allocs * HeapWordSize;
 311 }
 312 
 313 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
 314   return _gclab_allocs * HeapWordSize;
 315 }
 316 
 317 void ShenandoahHeapRegion::set_live_data(size_t s) {
 318   assert(Thread::current()->is_VM_thread(), "by VM thread");
 319   size_t v = s >> LogHeapWordSize;
 320   assert(v < (size_t)max_jint, "sanity");
 321   _live_data = (jint)v;
 322 }
 323 
 324 void ShenandoahHeapRegion::print_on(outputStream* st) const {
 325   st->print("|");
 326   st->print(SIZE_FORMAT_W(5), this->_index);
 327 
 328   switch (_state) {
 329     case _empty_uncommitted:
 330       st->print("|EU ");
 331       break;
 332     case _empty_committed:
 333       st->print("|EC ");
 334       break;
 335     case _regular:
 336       st->print("|R  ");
 337       break;
 338     case _humongous_start:
 339       st->print("|H  ");
 340       break;
 341     case _pinned_humongous_start:
 342       st->print("|HP ");
 343       break;
 344     case _humongous_cont:
 345       st->print("|HC ");
 346       break;
 347     case _cset:
 348       st->print("|CS ");
 349       break;
 350     case _trash:
 351       st->print("|T  ");
 352       break;
 353     case _pinned:
 354       st->print("|P  ");
 355       break;
 356     case _pinned_cset:
 357       st->print("|CSP");
 358       break;
 359     default:
 360       ShouldNotReachHere();
 361   }
 362   st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
 363             p2i(bottom()), p2i(top()), p2i(end()));
 364   st->print("|TAMS " INTPTR_FORMAT_W(12),
 365             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
 366   st->print("|UWM " INTPTR_FORMAT_W(12),
 367             p2i(_update_watermark));
 368   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
 369   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
 370   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
 371   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
 372   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
 373   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
 374   st->cr();
 375 }
 376 
 377 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
 378   ShenandoahHeap* heap = ShenandoahHeap::heap();
 379   assert(is_humongous(), "Must be a part of the humongous region");
 380   size_t i = index();
 381   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
 382   while (!r->is_humongous_start()) {
 383     assert(i > 0, "Sanity");
 384     i--;
 385     r = heap->get_region(i);
 386     assert(r->is_humongous(), "Must be a part of the humongous region");
 387   }
 388   assert(r->is_humongous_start(), "Must be");
 389   return r;
 390 }
 391 
 392 void ShenandoahHeapRegion::recycle() {
 393   set_top(bottom());
 394   clear_live_data();
 395   reset_alloc_metadata();
 396 
 397   ShenandoahHeap::heap()->marking_context()->reset_top_at_mark_start(this);
 398   set_update_watermark(bottom());
 399 
 400   make_empty();
 401 
 402   if (ZapUnusedHeapArea) {
 403     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
 404   }
 405 }
 406 
 407 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
 408   assert(MemRegion(bottom(), end()).contains(p),
 409          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 410                  p2i(p), p2i(bottom()), p2i(end())));
 411   if (p >= top()) {
 412     return top();
 413   } else {
 414     HeapWord* last = bottom();
 415     HeapWord* cur = last;
 416     while (cur <= p) {
 417       last = cur;
 418       cur += oop(cur)->size();
 419     }
 420     shenandoah_assert_correct(NULL, oop(last));
 421     return last;
 422   }
 423 }
 424 
 425 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
 426   assert(MemRegion(bottom(), end()).contains(p),
 427          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 428                  p2i(p), p2i(bottom()), p2i(end())));
 429   if (p < top()) {
 430     return oop(p)->size();
 431   } else {
 432     assert(p == top(), "just checking");
 433     return pointer_delta(end(), (HeapWord*) p);
 434   }
 435 }
 436 
 437 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
 438   // Absolute minimums we should not ever break:
 439   static const size_t MIN_REGION_SIZE = 256*K;
 440 
 441   size_t region_size;
 442   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
 443     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 444       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 445                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
 446                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 447                       MIN_NUM_REGIONS,
 448                       byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
 449                       proper_unit_for_byte_size(ShenandoahMinRegionSize));
 450       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 451     }
 452     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
 453       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
 454                       byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
 455                       proper_unit_for_byte_size(ShenandoahMinRegionSize),
 456                       byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
 457       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 458     }
 459     if (ShenandoahMinRegionSize < MinTLABSize) {
 460       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
 461                       byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
 462                       proper_unit_for_byte_size(ShenandoahMinRegionSize),
 463                       byte_size_in_proper_unit<size_t>(MinTLABSize), proper_unit_for_byte_size(MinTLABSize));
 464       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
 465     }
 466     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
 467       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
 468                       byte_size_in_proper_unit<size_t>(ShenandoahMaxRegionSize),
 469                       proper_unit_for_byte_size(ShenandoahMaxRegionSize),
 470                       byte_size_in_proper_unit(MIN_REGION_SIZE), proper_unit_for_byte_size(MIN_REGION_SIZE));
 471       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
 472     }
 473     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
 474       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
 475                       byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
 476                       proper_unit_for_byte_size(ShenandoahMinRegionSize),
 477                       byte_size_in_proper_unit<size_t>(ShenandoahMaxRegionSize),
 478                       proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 479       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
 480     }
 481 
 482     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
 483     // for usual heap sizes. Do not depend on initial_heap_size here.
 484     region_size = max_heap_size / ShenandoahTargetNumRegions;
 485 
 486     // Now make sure that we don't go over or under our limits.
 487     region_size = MAX2<size_t>(ShenandoahMinRegionSize, region_size);
 488     region_size = MIN2<size_t>(ShenandoahMaxRegionSize, region_size);
 489 
 490   } else {
 491     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
 492       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
 493                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
 494                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
 495                       MIN_NUM_REGIONS,
 496                       byte_size_in_proper_unit<size_t>(ShenandoahRegionSize),
 497                       proper_unit_for_byte_size(ShenandoahRegionSize));
 498       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 499     }
 500     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
 501       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
 502                       byte_size_in_proper_unit<size_t>(ShenandoahRegionSize),
 503                       proper_unit_for_byte_size(ShenandoahRegionSize),
 504                       byte_size_in_proper_unit<size_t>(ShenandoahMinRegionSize),
 505                       proper_unit_for_byte_size(ShenandoahMinRegionSize));
 506       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 507     }
 508     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
 509       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
 510                       byte_size_in_proper_unit<size_t>(ShenandoahRegionSize),
 511                       proper_unit_for_byte_size(ShenandoahRegionSize),
 512                       byte_size_in_proper_unit<size_t>(ShenandoahMaxRegionSize),
 513                       proper_unit_for_byte_size(ShenandoahMaxRegionSize));
 514       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
 515     }
 516     region_size = ShenandoahRegionSize;
 517   }
 518 
 519   if (1 > ShenandoahHumongousThreshold || ShenandoahHumongousThreshold > 100) {
 520     vm_exit_during_initialization("Invalid -XX:ShenandoahHumongousThreshold option, should be within [1..100]");
 521   }
 522 
 523   // Make sure region size and heap size are page aligned.
 524   // If large pages are used, we ensure that region size is aligned to large page size if
 525   // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
 526   // region size to regular page size.
 527 
 528   // Figure out page size to use, and aligns up heap to page size
 529   int page_size = os::vm_page_size();
 530   if (UseLargePages) {
 531     size_t large_page_size = os::large_page_size();
 532     max_heap_size = align_up(max_heap_size, large_page_size);
 533     if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
 534       page_size = (int)large_page_size;
 535     } else {
 536       // Should have been checked during argument initialization
 537       assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
 538     }
 539   } else {
 540     max_heap_size = align_up(max_heap_size, page_size);
 541   }
 542 
 543   // Align region size to page size
 544   region_size = align_up(region_size, page_size);
 545   int region_size_log = log2_long((jlong) region_size);
 546   // Recalculate the region size to make sure it's a power of
 547   // 2. This means that region_size is the largest power of 2 that's
 548   // <= what we've calculated so far.
 549   region_size = size_t(1) << region_size_log;
 550 
 551   // Now, set up the globals.
 552   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
 553   RegionSizeBytesShift = (size_t)region_size_log;
 554 
 555   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
 556   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
 557 
 558   guarantee(RegionSizeBytes == 0, "we should only set it once");
 559   RegionSizeBytes = region_size;
 560   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
 561   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
 562 
 563   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
 564   RegionSizeWordsMask = RegionSizeWords - 1;
 565 
 566   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
 567   RegionSizeBytesMask = RegionSizeBytes - 1;
 568 
 569   guarantee(RegionCount == 0, "we should only set it once");
 570   RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
 571 
 572   guarantee(HumongousThresholdWords == 0, "we should only set it once");
 573   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
 574   HumongousThresholdWords = (size_t)align_size_down(HumongousThresholdWords, MinObjAlignment);
 575   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
 576 
 577   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
 578   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
 579   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
 580 
 581   // The rationale for trimming the TLAB sizes has to do with the raciness in
 582   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
 583   // about next free size, gets the answer for region #N, goes away for a while, then
 584   // tries to allocate in region #N, and fail because some other thread have claimed part
 585   // of the region #N, and then the freeset allocation code has to retire the region #N,
 586   // before moving the allocation to region #N+1.
 587   //
 588   // The worst case realizes when "answer" is "region size", which means it could
 589   // prematurely retire an entire region. Having smaller TLABs does not fix that
 590   // completely, but reduces the probability of too wasteful region retirement.
 591   // With current divisor, we will waste no more than 1/8 of region size in the worst
 592   // case. This also has a secondary effect on collection set selection: even under
 593   // the race, the regions would be at least 7/8 used, which allows relying on
 594   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
 595   // below the garbage threshold that would never be considered for collection.
 596   //
 597   // The whole thing would be mitigated if Elastic TLABs were enabled, but there
 598   // is no support in this JDK.
 599   //
 600   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
 601   MaxTLABSizeWords = MIN2(RegionSizeWords / 8, HumongousThresholdWords);
 602   MaxTLABSizeWords = (size_t)align_size_down(MaxTLABSizeWords, MinObjAlignment);
 603 
 604   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
 605   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
 606   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
 607 
 608   log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
 609                      RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
 610   log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
 611                      byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
 612   log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
 613                      byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
 614 
 615   return max_heap_size;
 616 }
 617 
 618 void ShenandoahHeapRegion::do_commit() {
 619   ShenandoahHeap* heap = ShenandoahHeap::heap();
 620   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
 621     report_java_out_of_memory("Unable to commit region");
 622   }
 623   if (!heap->commit_bitmap_slice(this)) {
 624     report_java_out_of_memory("Unable to commit bitmaps for region");
 625   }
 626   if (AlwaysPreTouch) {
 627     os::pretouch_memory((char*)bottom(), (char*)end());
 628   }
 629   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
 630 }
 631 
 632 void ShenandoahHeapRegion::do_uncommit() {
 633   ShenandoahHeap* heap = ShenandoahHeap::heap();
 634   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
 635     report_java_out_of_memory("Unable to uncommit region");
 636   }
 637   if (!heap->uncommit_bitmap_slice(this)) {
 638     report_java_out_of_memory("Unable to uncommit bitmaps for region");
 639   }
 640   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
 641 }
 642 
 643 void ShenandoahHeapRegion::record_pin() {
 644   Atomic::add(1, &_critical_pins);
 645 }
 646 
 647 void ShenandoahHeapRegion::record_unpin() {
 648   assert(pin_count() > 0, err_msg("Region " SIZE_FORMAT " should have non-zero pins", index()));
 649   Atomic::add(-1, &_critical_pins);
 650 }
 651 
 652 size_t ShenandoahHeapRegion::pin_count() const {
 653   jint v = OrderAccess::load_acquire((volatile jint*)&_critical_pins);
 654   assert(v >= 0, "sanity");
 655   return (size_t)v;
 656 }
 657 
 658 void ShenandoahHeapRegion::set_state(RegionState to) {
 659   EventShenandoahHeapRegionStateChange evt;
 660   if (evt.should_commit()){
 661     evt.set_index((unsigned)index());
 662     evt.set_start((uintptr_t)bottom());
 663     evt.set_used(used());
 664     evt.set_from(_state);
 665     evt.set_to(to);
 666     evt.commit();
 667   }
 668   _state = to;
 669 }