1 /*
  2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "gc/shared/cardTable.hpp"
 29 #include "gc/shared/space.hpp"
 30 #include "gc/shared/tlab_globals.hpp"
 31 #include "gc/shenandoah/shenandoahCardTable.hpp"
 32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 33 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 38 #include "gc/shenandoah/shenandoahGeneration.hpp"
 39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 41 #include "jfr/jfrEvents.hpp"
 42 #include "memory/allocation.hpp"
 43 #include "memory/iterator.inline.hpp"
 44 #include "memory/resourceArea.hpp"
 45 #include "memory/universe.hpp"
 46 #include "oops/oop.inline.hpp"
 47 #include "runtime/atomic.hpp"
 48 #include "runtime/globals_extension.hpp"
 49 #include "runtime/java.hpp"
 50 #include "runtime/mutexLocker.hpp"
 51 #include "runtime/os.hpp"
 52 #include "runtime/safepoint.hpp"
 53 #include "utilities/powerOfTwo.hpp"
 54 
 55 
 56 size_t ShenandoahHeapRegion::RegionCount = 0;
 57 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
 58 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
 59 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
 60 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
 61 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
 62 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
 63 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
 64 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
 65 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
 66 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
 67 
 68 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
 69   _index(index),
 70   _bottom(start),
 71   _end(start + RegionSizeWords),
 72   _new_top(nullptr),
 73   _empty_time(os::elapsedTime()),
 74   _top_before_promoted(nullptr),
 75   _state(committed ? _empty_committed : _empty_uncommitted),
 76   _top(start),
 77   _tlab_allocs(0),
 78   _gclab_allocs(0),
 79   _plab_allocs(0),
 80   _live_data(0),
 81   _critical_pins(0),
 82   _update_watermark(start),
 83   _age(0)
 84 #ifdef SHENANDOAH_CENSUS_NOISE
 85   , _youth(0)
 86 #endif // SHENANDOAH_CENSUS_NOISE
 87   {
 88 
 89   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
 90          "invalid space boundaries");
 91   if (ZapUnusedHeapArea && committed) {
 92     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
 93   }
 94 }
 95 
 96 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
 97   stringStream ss;
 98   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
 99   print_on(&ss);
100   fatal("%s", ss.freeze());
101 }
102 
103 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) {
104   shenandoah_assert_heaplocked();
105   reset_age();
106   switch (_state) {
107     case _empty_uncommitted:
108       do_commit();
109     case _empty_committed:
110       assert(this->affiliation() == affiliation, "Region affiliation should already be established");
111       set_state(_regular);
112     case _regular:
113     case _pinned:
114       return;
115     default:
116       report_illegal_transition("regular allocation");
117   }
118 }
119 
120 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
121 // behavior previously performed as a side effect of make_regular_bypass().  This is used by Full GC
122 void ShenandoahHeapRegion::make_young_maybe() {
123   shenandoah_assert_heaplocked();
124   assert(!ShenandoahHeap::heap()->mode()->is_generational(), "Only call if non-generational");
125   switch (_state) {
126    case _empty_uncommitted:
127    case _empty_committed:
128    case _cset:
129    case _humongous_start:
130    case _humongous_cont:
131      if (affiliation() != YOUNG_GENERATION) {
132        set_affiliation(YOUNG_GENERATION);
133      }
134      return;
135    case _pinned_cset:
136    case _regular:
137    case _pinned:
138      return;
139    default:
140      assert(false, "Unexpected _state in make_young_maybe");
141   }
142 }
143 
144 void ShenandoahHeapRegion::make_regular_bypass() {
145   shenandoah_assert_heaplocked();
146   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
147           "only for full or degen GC");
148   reset_age();
149   switch (_state) {
150     case _empty_uncommitted:
151       do_commit();
152     case _empty_committed:
153     case _cset:
154     case _humongous_start:
155     case _humongous_cont:
156       set_state(_regular);
157       return;
158     case _pinned_cset:
159       set_state(_pinned);
160       return;
161     case _regular:
162     case _pinned:
163       return;
164     default:
165       report_illegal_transition("regular bypass");
166   }
167 }
168 
169 void ShenandoahHeapRegion::make_humongous_start() {
170   shenandoah_assert_heaplocked();
171   reset_age();
172   switch (_state) {
173     case _empty_uncommitted:
174       do_commit();
175     case _empty_committed:
176       set_state(_humongous_start);
177       return;
178     default:
179       report_illegal_transition("humongous start allocation");
180   }
181 }
182 
183 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) {
184   shenandoah_assert_heaplocked();
185   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
186   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
187   set_affiliation(affiliation);
188   reset_age();
189   switch (_state) {
190     case _empty_committed:
191     case _regular:
192     case _humongous_start:
193     case _humongous_cont:
194       set_state(_humongous_start);
195       return;
196     default:
197       report_illegal_transition("humongous start bypass");
198   }
199 }
200 
201 void ShenandoahHeapRegion::make_humongous_cont() {
202   shenandoah_assert_heaplocked();
203   reset_age();
204   switch (_state) {
205     case _empty_uncommitted:
206       do_commit();
207     case _empty_committed:
208      set_state(_humongous_cont);
209       return;
210     default:
211       report_illegal_transition("humongous continuation allocation");
212   }
213 }
214 
215 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) {
216   shenandoah_assert_heaplocked();
217   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
218   set_affiliation(affiliation);
219   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
220   reset_age();
221   switch (_state) {
222     case _empty_committed:
223     case _regular:
224     case _humongous_start:
225     case _humongous_cont:
226       set_state(_humongous_cont);
227       return;
228     default:
229       report_illegal_transition("humongous continuation bypass");
230   }
231 }
232 
233 void ShenandoahHeapRegion::make_pinned() {
234   shenandoah_assert_heaplocked();
235   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
236 
237   switch (_state) {
238     case _regular:
239       set_state(_pinned);
240     case _pinned_cset:
241     case _pinned:
242       return;
243     case _humongous_start:
244       set_state(_pinned_humongous_start);
245     case _pinned_humongous_start:
246       return;
247     case _cset:
248       _state = _pinned_cset;
249       return;
250     default:
251       report_illegal_transition("pinning");
252   }
253 }
254 
255 void ShenandoahHeapRegion::make_unpinned() {
256   shenandoah_assert_heaplocked();
257   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
258 
259   switch (_state) {
260     case _pinned:
261       assert(is_affiliated(), "Pinned region should be affiliated");
262       set_state(_regular);
263       return;
264     case _regular:
265     case _humongous_start:
266       return;
267     case _pinned_cset:
268       set_state(_cset);
269       return;
270     case _pinned_humongous_start:
271       set_state(_humongous_start);
272       return;
273     default:
274       report_illegal_transition("unpinning");
275   }
276 }
277 
278 void ShenandoahHeapRegion::make_cset() {
279   shenandoah_assert_heaplocked();
280   // Leave age untouched.  We need to consult the age when we are deciding whether to promote evacuated objects.
281   switch (_state) {
282     case _regular:
283       set_state(_cset);
284     case _cset:
285       return;
286     default:
287       report_illegal_transition("cset");
288   }
289 }
290 
291 void ShenandoahHeapRegion::make_trash() {
292   shenandoah_assert_heaplocked();
293   reset_age();
294   switch (_state) {


295     case _humongous_start:
296     case _humongous_cont:
297     {
298       // Reclaiming humongous regions and reclaim humongous waste.  When this region is eventually recycled, we'll reclaim
299       // its used memory.  At recycle time, we no longer recognize this as a humongous region.
300       decrement_humongous_waste();
301     }
302     case _cset:
303       // Reclaiming cset regions
304     case _regular:
305       // Immediate region reclaim
306       set_state(_trash);
307       return;
308     default:
309       report_illegal_transition("trashing");
310   }
311 }
312 
313 void ShenandoahHeapRegion::make_trash_immediate() {
314   make_trash();
315 
316   // On this path, we know there are no marked objects in the region,
317   // tell marking context about it to bypass bitmap resets.
318   assert(ShenandoahHeap::heap()->gc_generation()->is_mark_complete(), "Marking should be complete here.");
319   shenandoah_assert_generations_reconciled();
320   ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
321 }
322 
323 void ShenandoahHeapRegion::make_empty() {
324   shenandoah_assert_heaplocked();
325   reset_age();
326   CENSUS_NOISE(clear_youth();)
327   switch (_state) {
328     case _trash:
329       set_state(_empty_committed);
330       _empty_time = os::elapsedTime();
331       return;
332     default:
333       report_illegal_transition("emptying");
334   }
335 }
336 
337 void ShenandoahHeapRegion::make_uncommitted() {
338   shenandoah_assert_heaplocked();
339   switch (_state) {
340     case _empty_committed:
341       do_uncommit();
342       set_state(_empty_uncommitted);
343       return;
344     default:
345       report_illegal_transition("uncommiting");
346   }
347 }
348 
349 void ShenandoahHeapRegion::make_committed_bypass() {
350   shenandoah_assert_heaplocked();
351   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
352 
353   switch (_state) {
354     case _empty_uncommitted:
355       do_commit();
356       set_state(_empty_committed);
357       return;
358     default:
359       report_illegal_transition("commit bypass");
360   }
361 }
362 
363 void ShenandoahHeapRegion::reset_alloc_metadata() {
364   _tlab_allocs = 0;
365   _gclab_allocs = 0;
366   _plab_allocs = 0;
367 }
368 
369 size_t ShenandoahHeapRegion::get_shared_allocs() const {
370   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
371 }
372 
373 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
374   return _tlab_allocs * HeapWordSize;
375 }
376 
377 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
378   return _gclab_allocs * HeapWordSize;
379 }
380 
381 size_t ShenandoahHeapRegion::get_plab_allocs() const {
382   return _plab_allocs * HeapWordSize;
383 }
384 
385 void ShenandoahHeapRegion::set_live_data(size_t s) {
386   assert(Thread::current()->is_VM_thread(), "by VM thread");
387   _live_data = (s >> LogHeapWordSize);
388 }
389 
390 void ShenandoahHeapRegion::print_on(outputStream* st) const {
391   st->print("|");
392   st->print(SIZE_FORMAT_W(5), this->_index);
393 
394   switch (_state) {
395     case _empty_uncommitted:
396       st->print("|EU ");
397       break;
398     case _empty_committed:
399       st->print("|EC ");
400       break;
401     case _regular:
402       st->print("|R  ");
403       break;
404     case _humongous_start:
405       st->print("|H  ");
406       break;
407     case _pinned_humongous_start:
408       st->print("|HP ");
409       break;
410     case _humongous_cont:
411       st->print("|HC ");
412       break;
413     case _cset:
414       st->print("|CS ");
415       break;
416     case _trash:
417       st->print("|TR ");
418       break;
419     case _pinned:
420       st->print("|P  ");
421       break;
422     case _pinned_cset:
423       st->print("|CSP");
424       break;
425     default:
426       ShouldNotReachHere();
427   }
428 
429   st->print("|%s", shenandoah_affiliation_code(affiliation()));
430 
431 #define SHR_PTR_FORMAT "%12" PRIxPTR
432 
433   st->print("|BTE " SHR_PTR_FORMAT  ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
434             p2i(bottom()), p2i(top()), p2i(end()));
435   st->print("|TAMS " SHR_PTR_FORMAT,
436             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
437   st->print("|UWM " SHR_PTR_FORMAT,
438             p2i(_update_watermark));
439   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
440   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
441   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
442   if (ShenandoahHeap::heap()->mode()->is_generational()) {
443     st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
444   }
445   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
446   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
447   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
448   st->cr();
449 
450 #undef SHR_PTR_FORMAT
451 }
452 
453 // oop_iterate without closure, return true if completed without cancellation
454 bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
455 
456   // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
457   const size_t preemption_stride = 128;
458 
459   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
460   if (!is_active()) {
461     end_preemptible_coalesce_and_fill();
462     return true;
463   }

464 
465   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
466   ShenandoahMarkingContext* marking_context = heap->marking_context();
467 
468   // Expect marking to be completed before these threads invoke this service.
469   assert(heap->gc_generation()->is_mark_complete(), "sanity");
470   shenandoah_assert_generations_reconciled();
471 
472   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
473   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
474   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
475   // and will be treated as live during the current old-gen marking pass, even though they will not be
476   // explicitly marked.
477   HeapWord* t = marking_context->top_at_mark_start(this);
478 
479   // Resume coalesce and fill from this address
480   HeapWord* obj_addr = resume_coalesce_and_fill();
481 
482   size_t ops_before_preempt_check = preemption_stride;
483   while (obj_addr < t) {
484     oop obj = cast_to_oop(obj_addr);
485     if (marking_context->is_marked(obj)) {
486       assert(obj->klass() != nullptr, "klass should not be nullptr");
487       obj_addr += obj->size();
488     } else {
489       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
490       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
491       assert(next_marked_obj <= t, "next marked object cannot exceed top");
492       size_t fill_size = next_marked_obj - obj_addr;
493       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
494       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
495       heap->old_generation()->card_scan()->coalesce_objects(obj_addr, fill_size);
496       obj_addr = next_marked_obj;
497     }
498     if (cancellable && ops_before_preempt_check-- == 0) {
499       if (heap->cancelled_gc()) {
500         suspend_coalesce_and_fill(obj_addr);
501         return false;
502       }
503       ops_before_preempt_check = preemption_stride;
504     }
505   }
506   // Mark that this region has been coalesced and filled
507   end_preemptible_coalesce_and_fill();
508   return true;
509 }
510 
511 // DO NOT CANCEL.  If this worker thread has accepted responsibility for scanning a particular range of addresses, it
512 // must finish the work before it can be cancelled.
513 void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only,
514                                                        HeapWord* start, size_t words, bool write_table) {
515   assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
516   assert(is_humongous(), "only humongous region here");
517   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
518 
519   // Find head.
520   ShenandoahHeapRegion* r = humongous_start_region();
521   assert(r->is_humongous_start(), "need humongous head here");
522   assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
523          "slice must be integral number of cards");
524 
525   oop obj = cast_to_oop(r->bottom());
526   RememberedScanner* scanner = heap->old_generation()->card_scan();
527   size_t card_index = scanner->card_index_for_addr(start);
528   size_t num_cards = words / CardTable::card_size_in_words();
529 
530   if (dirty_only) {
531     if (write_table) {
532       while (num_cards-- > 0) {
533         if (scanner->is_write_card_dirty(card_index++)) {
534           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
535         }
536         start += CardTable::card_size_in_words();
537       }
538     } else {
539       while (num_cards-- > 0) {
540         if (scanner->is_card_dirty(card_index++)) {
541           obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
542         }
543         start += CardTable::card_size_in_words();
544       }
545     }
546   } else {
547     // Scan all data, regardless of whether cards are dirty
548     obj->oop_iterate(blk, MemRegion(start, start + num_cards * CardTable::card_size_in_words()));
549   }
550 }
551 
552 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
553   ShenandoahHeap* heap = ShenandoahHeap::heap();
554   assert(is_humongous(), "Must be a part of the humongous region");
555   size_t i = index();
556   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
557   while (!r->is_humongous_start()) {
558     assert(i > 0, "Sanity");
559     i--;
560     r = heap->get_region(i);
561     assert(r->is_humongous(), "Must be a part of the humongous region");
562   }
563   assert(r->is_humongous_start(), "Must be");
564   return r;
565 }
566 
567 void ShenandoahHeapRegion::recycle() {
568   shenandoah_assert_heaplocked();
569   ShenandoahHeap* heap = ShenandoahHeap::heap();
570   ShenandoahGeneration* generation = heap->generation_for(affiliation());
571 
572   heap->decrease_used(generation, used());
573   generation->decrement_affiliated_region_count();
574 
575   set_top(bottom());
576   clear_live_data();
577 
578   reset_alloc_metadata();
579 
580   heap->marking_context()->reset_top_at_mark_start(this);
581   set_update_watermark(bottom());
582 
583   make_empty();
584 
585   set_affiliation(FREE);
586   if (ZapUnusedHeapArea) {
587     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
588   }
589 }
590 
591 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
592   assert(MemRegion(bottom(), end()).contains(p),
593          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
594          p2i(p), p2i(bottom()), p2i(end()));
595   if (p >= top()) {
596     return top();
597   } else {
598     HeapWord* last = bottom();
599     HeapWord* cur = last;
600     while (cur <= p) {
601       last = cur;
602       cur += cast_to_oop(cur)->size();
603     }
604     shenandoah_assert_correct(nullptr, cast_to_oop(last));
605     return last;
606   }
607 }
608 
609 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
610   assert(MemRegion(bottom(), end()).contains(p),
611          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
612          p2i(p), p2i(bottom()), p2i(end()));
613   if (p < top()) {
614     return cast_to_oop(p)->size();
615   } else {
616     assert(p == top(), "just checking");
617     return pointer_delta(end(), (HeapWord*) p);
618   }
619 }
620 
621 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
622   // Absolute minimums we should not ever break.
623   static const size_t MIN_REGION_SIZE = 256*K;
624 
625   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
626     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
627   }
628 
629   // Generational Shenandoah needs this alignment for card tables.
630   if (strcmp(ShenandoahGCMode, "generational") == 0) {
631     max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint());
632   }
633 
634   size_t region_size;
635   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
636     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
637       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
638                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
639                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
640                       MIN_NUM_REGIONS,
641                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
642       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
643     }
644     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
645       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
646                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
647                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
648       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
649     }
650     if (ShenandoahMinRegionSize < MinTLABSize) {
651       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
652                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
653                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
654       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
655     }
656     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
657       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
658                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
659                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
660       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
661     }
662     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
663       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
664                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
665                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
666       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
667     }
668 
669     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
670     // for usual heap sizes. Do not depend on initial_heap_size here.
671     region_size = max_heap_size / ShenandoahTargetNumRegions;
672 
673     // Now make sure that we don't go over or under our limits.
674     region_size = MAX2(ShenandoahMinRegionSize, region_size);
675     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
676 
677   } else {
678     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
679       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
680                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
681                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
682                       MIN_NUM_REGIONS,
683                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
684       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
685     }
686     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
687       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
688                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
689                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
690       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
691     }
692     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
693       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
694                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
695                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
696       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
697     }
698     region_size = ShenandoahRegionSize;
699   }
700 
701   // Make sure region size and heap size are page aligned.
702   // If large pages are used, we ensure that region size is aligned to large page size if
703   // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
704   // region size to regular page size.
705 
706   // Figure out page size to use, and aligns up heap to page size
707   size_t page_size = os::vm_page_size();
708   if (UseLargePages) {
709     size_t large_page_size = os::large_page_size();
710     max_heap_size = align_up(max_heap_size, large_page_size);
711     if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
712       page_size = large_page_size;
713     } else {
714       // Should have been checked during argument initialization
715       assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
716     }
717   } else {
718     max_heap_size = align_up(max_heap_size, page_size);
719   }
720 
721   // Align region size to page size
722   region_size = align_up(region_size, page_size);
723 
724   int region_size_log = log2i(region_size);
725   // Recalculate the region size to make sure it's a power of
726   // 2. This means that region_size is the largest power of 2 that's
727   // <= what we've calculated so far.
728   region_size = size_t(1) << region_size_log;
729 
730   // Now, set up the globals.
731   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
732   RegionSizeBytesShift = (size_t)region_size_log;
733 
734   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
735   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
736 
737   guarantee(RegionSizeBytes == 0, "we should only set it once");
738   RegionSizeBytes = region_size;
739   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
740   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
741 
742   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
743   RegionSizeWordsMask = RegionSizeWords - 1;
744 
745   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
746   RegionSizeBytesMask = RegionSizeBytes - 1;
747 
748   guarantee(RegionCount == 0, "we should only set it once");
749   RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
750   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
751 
752   guarantee(HumongousThresholdWords == 0, "we should only set it once");
753   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
754   HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment);
755   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
756 
757   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
758   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
759   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
760 
761   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
762   MaxTLABSizeWords = MIN2(RegionSizeWords, HumongousThresholdWords);
763   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
764 
765   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
766   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
767   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
768 
769   return max_heap_size;
770 }
771 
772 void ShenandoahHeapRegion::do_commit() {
773   ShenandoahHeap* heap = ShenandoahHeap::heap();
774   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
775     report_java_out_of_memory("Unable to commit region");
776   }
777   if (!heap->commit_bitmap_slice(this)) {
778     report_java_out_of_memory("Unable to commit bitmaps for region");
779   }
780   if (AlwaysPreTouch) {
781     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
782   }
783   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
784 }
785 
786 void ShenandoahHeapRegion::do_uncommit() {
787   ShenandoahHeap* heap = ShenandoahHeap::heap();
788   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
789     report_java_out_of_memory("Unable to uncommit region");
790   }
791   if (!heap->uncommit_bitmap_slice(this)) {
792     report_java_out_of_memory("Unable to uncommit bitmaps for region");
793   }
794   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
795 }
796 
797 void ShenandoahHeapRegion::set_state(RegionState to) {
798   EventShenandoahHeapRegionStateChange evt;
799   if (evt.should_commit()){
800     evt.set_index((unsigned) index());
801     evt.set_start((uintptr_t)bottom());
802     evt.set_used(used());
803     evt.set_from(_state);
804     evt.set_to(to);
805     evt.commit();
806   }
807   _state = to;
808 }
809 
810 void ShenandoahHeapRegion::record_pin() {
811   Atomic::add(&_critical_pins, (size_t)1);
812 }
813 
814 void ShenandoahHeapRegion::record_unpin() {
815   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
816   Atomic::sub(&_critical_pins, (size_t)1);
817 }
818 
819 size_t ShenandoahHeapRegion::pin_count() const {
820   return Atomic::load(&_critical_pins);
821 }
822 
823 void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) {
824   ShenandoahHeap* heap = ShenandoahHeap::heap();
825 
826   ShenandoahAffiliation region_affiliation = heap->region_affiliation(this);
827   {
828     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
829     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
830                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
831                   index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation),
832                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
833   }
834 
835 #ifdef ASSERT
836   {
837     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
838     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
839     size_t idx = this->index();
840     HeapWord* top_bitmap = ctx->top_bitmap(this);
841 
842     assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
843            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
844            p2i(top_bitmap), p2i(_end));
845   }
846 #endif
847 
848   if (region_affiliation == new_affiliation) {
849     return;
850   }
851 
852   if (!heap->mode()->is_generational()) {
853     log_trace(gc)("Changing affiliation of region %zu from %s to %s",
854                   index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation));
855     heap->set_affiliation(this, new_affiliation);
856     return;
857   }
858 
859   switch (new_affiliation) {
860     case FREE:
861       assert(!has_live(), "Free region should not have live data");
862       break;
863     case YOUNG_GENERATION:
864       reset_age();
865       break;
866     case OLD_GENERATION:
867       // TODO: should we reset_age() for OLD as well?  Examine invocations of set_affiliation(). Some contexts redundantly
868       //       invoke reset_age().
869       break;
870     default:
871       ShouldNotReachHere();
872       return;
873   }
874   heap->set_affiliation(this, new_affiliation);
875 }
876 
877 void ShenandoahHeapRegion::decrement_humongous_waste() const {
878   assert(is_humongous(), "Should only use this for humongous regions");
879   size_t waste_bytes = free();
880   if (waste_bytes > 0) {
881     ShenandoahHeap* heap = ShenandoahHeap::heap();
882     ShenandoahGeneration* generation = heap->generation_for(affiliation());
883     heap->decrease_humongous_waste(generation, waste_bytes);
884   }
885 }
--- EOF ---