1 /*
  2  * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "gc/shared/cardTable.hpp"
 29 #include "gc/shared/space.hpp"
 30 #include "gc/shared/tlab_globals.hpp"
 31 #include "gc/shenandoah/shenandoahCardTable.hpp"
 32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 33 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 38 #include "gc/shenandoah/shenandoahGeneration.hpp"
 39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 41 #include "jfr/jfrEvents.hpp"
 42 #include "memory/allocation.hpp"
 43 #include "memory/iterator.inline.hpp"
 44 #include "memory/resourceArea.hpp"
 45 #include "memory/universe.hpp"
 46 #include "oops/oop.inline.hpp"
 47 #include "runtime/atomic.hpp"
 48 #include "runtime/globals_extension.hpp"
 49 #include "runtime/java.hpp"
 50 #include "runtime/mutexLocker.hpp"
 51 #include "runtime/os.hpp"
 52 #include "runtime/safepoint.hpp"
 53 #include "utilities/powerOfTwo.hpp"
 54 
 55 size_t ShenandoahHeapRegion::RegionCount = 0;
 56 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
 57 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
 58 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
 59 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
 60 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
 61 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
 62 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
 63 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
 64 
 65 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
 66   _index(index),
 67   _bottom(start),
 68   _end(start + RegionSizeWords),
 69   _new_top(nullptr),
 70   _empty_time(os::elapsedTime()),
 71   _top_before_promoted(nullptr),
 72   _state(committed ? _empty_committed : _empty_uncommitted),
 73   _top(start),
 74   _tlab_allocs(0),
 75   _gclab_allocs(0),
 76   _plab_allocs(0),
 77   _live_data(0),
 78   _critical_pins(0),
 79   _update_watermark(start),
 80   _age(0)
 81 #ifdef SHENANDOAH_CENSUS_NOISE
 82   , _youth(0)
 83 #endif // SHENANDOAH_CENSUS_NOISE
 84   {
 85 
 86   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
 87          "invalid space boundaries");
 88   if (ZapUnusedHeapArea && committed) {
 89     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
 90   }
 91 }
 92 
 93 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
 94   stringStream ss;
 95   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
 96   print_on(&ss);
 97   fatal("%s", ss.freeze());
 98 }
 99 
100 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) {
101   shenandoah_assert_heaplocked();
102   reset_age();
103   switch (_state) {
104     case _empty_uncommitted:
105       do_commit();
106     case _empty_committed:
107       assert(this->affiliation() == affiliation, "Region affiliation should already be established");
108       set_state(_regular);
109     case _regular:
110     case _pinned:
111       return;
112     default:
113       report_illegal_transition("regular allocation");
114   }
115 }
116 
117 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
118 // behavior previously performed as a side effect of make_regular_bypass().  This is used by Full GC in non-generational
119 // modes to transition regions from FREE. Note that all non-free regions in single-generational modes are young.
120 void ShenandoahHeapRegion::make_affiliated_maybe() {
121   shenandoah_assert_heaplocked();
122   assert(!ShenandoahHeap::heap()->mode()->is_generational(), "Only call if non-generational");
123   switch (_state) {
124    case _empty_uncommitted:
125    case _empty_committed:
126    case _cset:
127    case _humongous_start:
128    case _humongous_cont:
129      if (affiliation() != YOUNG_GENERATION) {
130        set_affiliation(YOUNG_GENERATION);
131      }
132      return;
133    case _pinned_cset:
134    case _regular:
135    case _pinned:
136      return;
137    default:
138      assert(false, "Unexpected _state in make_affiliated_maybe");
139   }
140 }
141 
142 void ShenandoahHeapRegion::make_regular_bypass() {
143   shenandoah_assert_heaplocked();
144   assert (!Universe::is_fully_initialized() ||
145           ShenandoahHeap::heap()->is_full_gc_in_progress() ||
146           ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
147           "Only for STW GC or when Universe is initializing (CDS)");
148   reset_age();
149   switch (_state) {
150     case _empty_uncommitted:
151       do_commit();
152     case _empty_committed:
153     case _cset:
154     case _humongous_start:
155     case _humongous_cont:
156       if (_state == _humongous_start || _state == _humongous_cont) {
157         // CDS allocates chunks of the heap to fill with regular objects. The allocator
158         // will dutifully track any waste in the unused portion of the last region. Once
159         // CDS has finished initializing the objects, it will convert these regions to
160         // regular regions. The 'waste' in the last region is no longer wasted at this point,
161         // so we must stop treating it as such.
162         decrement_humongous_waste();
163       }
164       set_state(_regular);
165       return;
166     case _pinned_cset:
167       set_state(_pinned);
168       return;
169     case _regular:
170     case _pinned:
171       return;
172     default:
173       report_illegal_transition("regular bypass");
174   }
175 }
176 
177 void ShenandoahHeapRegion::make_humongous_start() {
178   shenandoah_assert_heaplocked();
179   reset_age();
180   switch (_state) {
181     case _empty_uncommitted:
182       do_commit();
183     case _empty_committed:
184       set_state(_humongous_start);
185       return;
186     default:
187       report_illegal_transition("humongous start allocation");
188   }
189 }
190 
191 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) {
192   shenandoah_assert_heaplocked();
193   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
194   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
195   set_affiliation(affiliation);
196   reset_age();
197   switch (_state) {
198     case _empty_committed:
199     case _regular:
200     case _humongous_start:
201     case _humongous_cont:
202       set_state(_humongous_start);
203       return;
204     default:
205       report_illegal_transition("humongous start bypass");
206   }
207 }
208 
209 void ShenandoahHeapRegion::make_humongous_cont() {
210   shenandoah_assert_heaplocked();
211   reset_age();
212   switch (_state) {
213     case _empty_uncommitted:
214       do_commit();
215     case _empty_committed:
216      set_state(_humongous_cont);
217       return;
218     default:
219       report_illegal_transition("humongous continuation allocation");
220   }
221 }
222 
223 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) {
224   shenandoah_assert_heaplocked();
225   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
226   set_affiliation(affiliation);
227   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
228   reset_age();
229   switch (_state) {
230     case _empty_committed:
231     case _regular:
232     case _humongous_start:
233     case _humongous_cont:
234       set_state(_humongous_cont);
235       return;
236     default:
237       report_illegal_transition("humongous continuation bypass");
238   }
239 }
240 
241 void ShenandoahHeapRegion::make_pinned() {
242   shenandoah_assert_heaplocked();
243   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
244 
245   switch (_state) {
246     case _regular:
247       set_state(_pinned);
248     case _pinned_cset:
249     case _pinned:
250       return;
251     case _humongous_start:
252       set_state(_pinned_humongous_start);
253     case _pinned_humongous_start:
254       return;
255     case _cset:
256       _state = _pinned_cset;
257       return;
258     default:
259       report_illegal_transition("pinning");
260   }
261 }
262 
263 void ShenandoahHeapRegion::make_unpinned() {
264   shenandoah_assert_heaplocked();
265   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
266 
267   switch (_state) {
268     case _pinned:
269       assert(is_affiliated(), "Pinned region should be affiliated");
270       set_state(_regular);
271       return;
272     case _regular:
273     case _humongous_start:
274       return;
275     case _pinned_cset:
276       set_state(_cset);
277       return;
278     case _pinned_humongous_start:
279       set_state(_humongous_start);
280       return;
281     default:
282       report_illegal_transition("unpinning");
283   }
284 }
285 
286 void ShenandoahHeapRegion::make_cset() {
287   shenandoah_assert_heaplocked();
288   // Leave age untouched.  We need to consult the age when we are deciding whether to promote evacuated objects.
289   switch (_state) {
290     case _regular:
291       set_state(_cset);
292     case _cset:
293       return;
294     default:
295       report_illegal_transition("cset");
296   }
297 }
298 
299 void ShenandoahHeapRegion::make_trash() {
300   shenandoah_assert_heaplocked();
301   reset_age();
302   switch (_state) {
303     case _humongous_start:
304     case _humongous_cont:
305     {
306       // Reclaiming humongous regions and reclaim humongous waste.  When this region is eventually recycled, we'll reclaim
307       // its used memory.  At recycle time, we no longer recognize this as a humongous region.
308       decrement_humongous_waste();
309     }
310     case _cset:
311       // Reclaiming cset regions
312     case _regular:
313       // Immediate region reclaim
314       set_state(_trash);
315       return;
316     default:
317       report_illegal_transition("trashing");
318   }
319 }
320 
321 void ShenandoahHeapRegion::make_trash_immediate() {
322   make_trash();
323 
324   // On this path, we know there are no marked objects in the region,
325   // tell marking context about it to bypass bitmap resets.
326   assert(ShenandoahHeap::heap()->gc_generation()->is_mark_complete(), "Marking should be complete here.");
327   shenandoah_assert_generations_reconciled();
328   ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
329 }
330 
331 void ShenandoahHeapRegion::make_empty() {
332   shenandoah_assert_heaplocked();
333   reset_age();
334   CENSUS_NOISE(clear_youth();)
335   switch (_state) {
336     case _trash:
337       set_state(_empty_committed);
338       _empty_time = os::elapsedTime();
339       return;
340     default:
341       report_illegal_transition("emptying");
342   }
343 }
344 
345 void ShenandoahHeapRegion::make_uncommitted() {
346   shenandoah_assert_heaplocked();
347   switch (_state) {
348     case _empty_committed:
349       do_uncommit();
350       set_state(_empty_uncommitted);
351       return;
352     default:
353       report_illegal_transition("uncommiting");
354   }
355 }
356 
357 void ShenandoahHeapRegion::make_committed_bypass() {
358   shenandoah_assert_heaplocked();
359   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
360 
361   switch (_state) {
362     case _empty_uncommitted:
363       do_commit();
364       set_state(_empty_committed);
365       return;
366     default:
367       report_illegal_transition("commit bypass");
368   }
369 }
370 
371 void ShenandoahHeapRegion::reset_alloc_metadata() {
372   _tlab_allocs = 0;
373   _gclab_allocs = 0;
374   _plab_allocs = 0;
375 }
376 
377 size_t ShenandoahHeapRegion::get_shared_allocs() const {
378   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
379 }
380 
381 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
382   return _tlab_allocs * HeapWordSize;
383 }
384 
385 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
386   return _gclab_allocs * HeapWordSize;
387 }
388 
389 size_t ShenandoahHeapRegion::get_plab_allocs() const {
390   return _plab_allocs * HeapWordSize;
391 }
392 
393 void ShenandoahHeapRegion::set_live_data(size_t s) {
394   assert(Thread::current()->is_VM_thread(), "by VM thread");
395   _live_data = (s >> LogHeapWordSize);
396 }
397 
398 void ShenandoahHeapRegion::print_on(outputStream* st) const {
399   st->print("|");
400   st->print(SIZE_FORMAT_W(5), this->_index);
401 
402   switch (_state) {
403     case _empty_uncommitted:
404       st->print("|EU ");
405       break;
406     case _empty_committed:
407       st->print("|EC ");
408       break;
409     case _regular:
410       st->print("|R  ");
411       break;
412     case _humongous_start:
413       st->print("|H  ");
414       break;
415     case _pinned_humongous_start:
416       st->print("|HP ");
417       break;
418     case _humongous_cont:
419       st->print("|HC ");
420       break;
421     case _cset:
422       st->print("|CS ");
423       break;
424     case _trash:
425       st->print("|TR ");
426       break;
427     case _pinned:
428       st->print("|P  ");
429       break;
430     case _pinned_cset:
431       st->print("|CSP");
432       break;
433     default:
434       ShouldNotReachHere();
435   }
436 
437   st->print("|%s", shenandoah_affiliation_code(affiliation()));
438 
439 #define SHR_PTR_FORMAT "%12" PRIxPTR
440 
441   st->print("|BTE " SHR_PTR_FORMAT  ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
442             p2i(bottom()), p2i(top()), p2i(end()));
443   st->print("|TAMS " SHR_PTR_FORMAT,
444             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
445   st->print("|UWM " SHR_PTR_FORMAT,
446             p2i(_update_watermark));
447   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
448   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
449   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
450   if (ShenandoahHeap::heap()->mode()->is_generational()) {
451     st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
452   }
453   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
454   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
455   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
456   st->cr();
457 
458 #undef SHR_PTR_FORMAT
459 }
460 
461 // oop_iterate without closure, return true if completed without cancellation
462 bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
463 
464   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
465   if (!is_active()) {
466     end_preemptible_coalesce_and_fill();
467     return true;
468   }
469 
470   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
471   ShenandoahMarkingContext* marking_context = heap->marking_context();
472 
473   // Expect marking to be completed before these threads invoke this service.
474   assert(heap->gc_generation()->is_mark_complete(), "sanity");
475   shenandoah_assert_generations_reconciled();
476 
477   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
478   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
479   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
480   // and will be treated as live during the current old-gen marking pass, even though they will not be
481   // explicitly marked.
482   HeapWord* t = marking_context->top_at_mark_start(this);
483 
484   // Resume coalesce and fill from this address
485   HeapWord* obj_addr = resume_coalesce_and_fill();
486 
487   while (obj_addr < t) {
488     oop obj = cast_to_oop(obj_addr);
489     if (marking_context->is_marked(obj)) {
490       assert(obj->klass() != nullptr, "klass should not be nullptr");
491       obj_addr += obj->size();
492     } else {
493       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
494       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
495       assert(next_marked_obj <= t, "next marked object cannot exceed top");
496       size_t fill_size = next_marked_obj - obj_addr;
497       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
498       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
499       heap->old_generation()->card_scan()->coalesce_objects(obj_addr, fill_size);
500       obj_addr = next_marked_obj;
501     }
502     if (cancellable && heap->cancelled_gc()) {
503       suspend_coalesce_and_fill(obj_addr);
504       return false;
505     }
506   }
507   // Mark that this region has been coalesced and filled
508   end_preemptible_coalesce_and_fill();
509   return true;
510 }
511 
512 size_t get_card_count(size_t words) {
513   assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
514   assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
515          "slice must be integral number of cards");
516   return words / CardTable::card_size_in_words();
517 }
518 
519 void ShenandoahHeapRegion::oop_iterate_humongous_slice_dirty(OopIterateClosure* blk,
520                                                              HeapWord* start, size_t words, bool write_table) const {
521   assert(is_humongous(), "only humongous region here");
522 
523   ShenandoahHeapRegion* r = humongous_start_region();
524   oop obj = cast_to_oop(r->bottom());
525   size_t num_cards = get_card_count(words);
526 
527   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
528   ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
529   size_t card_index = scanner->card_index_for_addr(start);
530   if (write_table) {
531     while (num_cards-- > 0) {
532       if (scanner->is_write_card_dirty(card_index++)) {
533         obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
534       }
535       start += CardTable::card_size_in_words();
536     }
537   } else {
538     while (num_cards-- > 0) {
539       if (scanner->is_card_dirty(card_index++)) {
540         obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
541       }
542       start += CardTable::card_size_in_words();
543     }
544   }
545 }
546 
547 void ShenandoahHeapRegion::oop_iterate_humongous_slice_all(OopIterateClosure* cl, HeapWord* start, size_t words) const {
548   assert(is_humongous(), "only humongous region here");
549 
550   ShenandoahHeapRegion* r = humongous_start_region();
551   oop obj = cast_to_oop(r->bottom());
552 
553   // Scan all data, regardless of whether cards are dirty
554   obj->oop_iterate(cl, MemRegion(start, start + words));
555 }
556 
557 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
558   ShenandoahHeap* heap = ShenandoahHeap::heap();
559   assert(is_humongous(), "Must be a part of the humongous region");
560   size_t i = index();
561   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
562   while (!r->is_humongous_start()) {
563     assert(i > 0, "Sanity");
564     i--;
565     r = heap->get_region(i);
566     assert(r->is_humongous(), "Must be a part of the humongous region");
567   }
568   assert(r->is_humongous_start(), "Must be");
569   return r;
570 }
571 
572 void ShenandoahHeapRegion::recycle() {
573   shenandoah_assert_heaplocked();
574   ShenandoahHeap* heap = ShenandoahHeap::heap();
575   ShenandoahGeneration* generation = heap->generation_for(affiliation());
576 
577   heap->decrease_used(generation, used());
578   generation->decrement_affiliated_region_count();
579 
580   set_top(bottom());
581   clear_live_data();
582   reset_alloc_metadata();
583 
584   heap->marking_context()->reset_top_at_mark_start(this);
585 
586   set_update_watermark(bottom());
587 
588   make_empty();
589 
590   set_affiliation(FREE);
591   if (ZapUnusedHeapArea) {
592     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
593   }
594 }
595 
596 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
597   assert(MemRegion(bottom(), end()).contains(p),
598          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
599          p2i(p), p2i(bottom()), p2i(end()));
600   if (p >= top()) {
601     return top();
602   } else {
603     HeapWord* last = bottom();
604     HeapWord* cur = last;
605     while (cur <= p) {
606       last = cur;
607       cur += cast_to_oop(cur)->size();
608     }
609     shenandoah_assert_correct(nullptr, cast_to_oop(last));
610     return last;
611   }
612 }
613 
614 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
615   assert(MemRegion(bottom(), end()).contains(p),
616          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
617          p2i(p), p2i(bottom()), p2i(end()));
618   if (p < top()) {
619     return cast_to_oop(p)->size();
620   } else {
621     assert(p == top(), "just checking");
622     return pointer_delta(end(), (HeapWord*) p);
623   }
624 }
625 
626 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
627   // Absolute minimums we should not ever break.
628   static const size_t MIN_REGION_SIZE = 256*K;
629 
630   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
631     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
632   }
633 
634   // Generational Shenandoah needs this alignment for card tables.
635   if (strcmp(ShenandoahGCMode, "generational") == 0) {
636     max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint());
637   }
638 
639   size_t region_size;
640   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
641     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
642       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
643                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
644                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
645                       MIN_NUM_REGIONS,
646                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
647       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
648     }
649     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
650       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
651                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
652                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
653       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
654     }
655     if (ShenandoahMinRegionSize < MinTLABSize) {
656       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
657                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
658                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
659       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
660     }
661     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
662       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
663                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
664                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
665       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
666     }
667     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
668       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
669                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
670                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
671       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
672     }
673 
674     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
675     // for usual heap sizes. Do not depend on initial_heap_size here.
676     region_size = max_heap_size / ShenandoahTargetNumRegions;
677 
678     // Now make sure that we don't go over or under our limits.
679     region_size = MAX2(ShenandoahMinRegionSize, region_size);
680     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
681 
682   } else {
683     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
684       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
685                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
686                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
687                       MIN_NUM_REGIONS,
688                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
689       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
690     }
691     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
692       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
693                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
694                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
695       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
696     }
697     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
698       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
699                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
700                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
701       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
702     }
703     region_size = ShenandoahRegionSize;
704   }
705 
706   // Make sure region size and heap size are page aligned.
707   // If large pages are used, we ensure that region size is aligned to large page size if
708   // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
709   // region size to regular page size.
710 
711   // Figure out page size to use, and aligns up heap to page size
712   size_t page_size = os::vm_page_size();
713   if (UseLargePages) {
714     size_t large_page_size = os::large_page_size();
715     max_heap_size = align_up(max_heap_size, large_page_size);
716     if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
717       page_size = large_page_size;
718     } else {
719       // Should have been checked during argument initialization
720       assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
721     }
722   } else {
723     max_heap_size = align_up(max_heap_size, page_size);
724   }
725 
726   // Align region size to page size
727   region_size = align_up(region_size, page_size);
728 
729   int region_size_log = log2i(region_size);
730   // Recalculate the region size to make sure it's a power of
731   // 2. This means that region_size is the largest power of 2 that's
732   // <= what we've calculated so far.
733   region_size = size_t(1) << region_size_log;
734 
735   // Now, set up the globals.
736   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
737   RegionSizeBytesShift = (size_t)region_size_log;
738 
739   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
740   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
741 
742   guarantee(RegionSizeBytes == 0, "we should only set it once");
743   RegionSizeBytes = region_size;
744   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
745   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
746 
747   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
748   RegionSizeWordsMask = RegionSizeWords - 1;
749 
750   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
751   RegionSizeBytesMask = RegionSizeBytes - 1;
752 
753   guarantee(RegionCount == 0, "we should only set it once");
754   RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
755   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
756 
757   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
758   MaxTLABSizeWords = align_down(RegionSizeWords, MinObjAlignment);
759 
760   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
761   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
762   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
763 
764   return max_heap_size;
765 }
766 
767 void ShenandoahHeapRegion::do_commit() {
768   ShenandoahHeap* heap = ShenandoahHeap::heap();
769   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
770     report_java_out_of_memory("Unable to commit region");
771   }
772   if (!heap->commit_bitmap_slice(this)) {
773     report_java_out_of_memory("Unable to commit bitmaps for region");
774   }
775   if (AlwaysPreTouch) {
776     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
777   }
778   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
779 }
780 
781 void ShenandoahHeapRegion::do_uncommit() {
782   ShenandoahHeap* heap = ShenandoahHeap::heap();
783   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
784     report_java_out_of_memory("Unable to uncommit region");
785   }
786   if (!heap->uncommit_bitmap_slice(this)) {
787     report_java_out_of_memory("Unable to uncommit bitmaps for region");
788   }
789   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
790 }
791 
792 void ShenandoahHeapRegion::set_state(RegionState to) {
793   EventShenandoahHeapRegionStateChange evt;
794   if (evt.should_commit()){
795     evt.set_index((unsigned) index());
796     evt.set_start((uintptr_t)bottom());
797     evt.set_used(used());
798     evt.set_from(_state);
799     evt.set_to(to);
800     evt.commit();
801   }
802   _state = to;
803 }
804 
805 void ShenandoahHeapRegion::record_pin() {
806   Atomic::add(&_critical_pins, (size_t)1);
807 }
808 
809 void ShenandoahHeapRegion::record_unpin() {
810   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
811   Atomic::sub(&_critical_pins, (size_t)1);
812 }
813 
814 size_t ShenandoahHeapRegion::pin_count() const {
815   return Atomic::load(&_critical_pins);
816 }
817 
818 void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) {
819   ShenandoahHeap* heap = ShenandoahHeap::heap();
820 
821   ShenandoahAffiliation region_affiliation = heap->region_affiliation(this);
822   {
823     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
824     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
825                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
826                   index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation),
827                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
828   }
829 
830 #ifdef ASSERT
831   {
832     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
833     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
834     size_t idx = this->index();
835     HeapWord* top_bitmap = ctx->top_bitmap(this);
836 
837     assert(ctx->is_bitmap_range_within_region_clear(top_bitmap, _end),
838            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
839            p2i(top_bitmap), p2i(_end));
840   }
841 #endif
842 
843   if (region_affiliation == new_affiliation) {
844     return;
845   }
846 
847   if (!heap->mode()->is_generational()) {
848     log_trace(gc)("Changing affiliation of region %zu from %s to %s",
849                   index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation));
850     heap->set_affiliation(this, new_affiliation);
851     return;
852   }
853 
854   switch (new_affiliation) {
855     case FREE:
856       assert(!has_live(), "Free region should not have live data");
857       break;
858     case YOUNG_GENERATION:
859       reset_age();
860       break;
861     case OLD_GENERATION:
862       break;
863     default:
864       ShouldNotReachHere();
865       return;
866   }
867   heap->set_affiliation(this, new_affiliation);
868 }
869 
870 void ShenandoahHeapRegion::decrement_humongous_waste() const {
871   assert(is_humongous(), "Should only use this for humongous regions");
872   size_t waste_bytes = free();
873   if (waste_bytes > 0) {
874     ShenandoahHeap* heap = ShenandoahHeap::heap();
875     ShenandoahGeneration* generation = heap->generation_for(affiliation());
876     heap->decrease_humongous_waste(generation, waste_bytes);
877   }
878 }