1 /*
  2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "gc/shared/cardTable.hpp"
 29 #include "gc/shared/space.inline.hpp"
 30 #include "gc/shared/tlab_globals.hpp"
 31 #include "gc/shenandoah/shenandoahCardTable.hpp"
 32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 33 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 38 #include "gc/shenandoah/shenandoahGeneration.hpp"
 39 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 41 #include "jfr/jfrEvents.hpp"
 42 #include "memory/allocation.hpp"
 43 #include "memory/iterator.inline.hpp"
 44 #include "memory/resourceArea.hpp"
 45 #include "memory/universe.hpp"
 46 #include "oops/oop.inline.hpp"
 47 #include "runtime/atomic.hpp"
 48 #include "runtime/globals_extension.hpp"
 49 #include "runtime/java.hpp"
 50 #include "runtime/mutexLocker.hpp"
 51 #include "runtime/os.hpp"
 52 #include "runtime/safepoint.hpp"
 53 #include "utilities/powerOfTwo.hpp"
 54 
 55 
 56 size_t ShenandoahHeapRegion::RegionCount = 0;
 57 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
 58 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
 59 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
 60 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
 61 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
 62 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
 63 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
 64 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
 65 
 66 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
 67   _index(index),
 68   _bottom(start),
 69   _end(start + RegionSizeWords),
 70   _new_top(nullptr),
 71   _empty_time(os::elapsedTime()),
 72   _top_before_promoted(nullptr),
 73   _state(committed ? _empty_committed : _empty_uncommitted),
 74   _top(start),
 75   _tlab_allocs(0),
 76   _gclab_allocs(0),
 77   _plab_allocs(0),
 78   _live_data(0),
 79   _critical_pins(0),
 80   _update_watermark(start),
 81   _age(0),
 82 #ifdef SHENANDOAH_CENSUS_NOISE
 83   _youth(0),
 84 #endif // SHENANDOAH_CENSUS_NOISE
 85   _needs_bitmap_reset(false)
 86   {
 87 
 88   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
 89          "invalid space boundaries");
 90   if (ZapUnusedHeapArea && committed) {
 91     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
 92   }
 93   _recycling.unset();
 94 }
 95 
 96 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
 97   stringStream ss;
 98   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(state()), method);
 99   print_on(&ss);
100   fatal("%s", ss.freeze());
101 }
102 
103 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affiliation) {
104   shenandoah_assert_heaplocked();
105   reset_age();
106   switch (state()) {
107     case _empty_uncommitted:
108       do_commit();
109     case _empty_committed:
110       assert(this->affiliation() == affiliation, "Region affiliation should already be established");
111       set_state(_regular);
112     case _regular:
113     case _pinned:
114       return;
115     default:
116       report_illegal_transition("regular allocation");
117   }
118 }
119 
120 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
121 // behavior previously performed as a side effect of make_regular_bypass().  This is used by Full GC in non-generational
122 // modes to transition regions from FREE. Note that all non-free regions in single-generational modes are young.
123 void ShenandoahHeapRegion::make_affiliated_maybe() {
124   shenandoah_assert_heaplocked();
125   assert(!ShenandoahHeap::heap()->mode()->is_generational(), "Only call if non-generational");
126   switch (state()) {
127    case _empty_uncommitted:
128    case _empty_committed:
129    case _cset:
130    case _humongous_start:
131    case _humongous_cont:
132      if (affiliation() != YOUNG_GENERATION) {
133        set_affiliation(YOUNG_GENERATION);
134      }
135      return;
136    case _pinned_cset:
137    case _regular:
138    case _pinned:
139      return;
140    default:
141      assert(false, "Unexpected _state in make_affiliated_maybe");
142   }
143 }
144 
145 void ShenandoahHeapRegion::make_regular_bypass() {
146   shenandoah_assert_heaplocked();
147   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
148           "only for full or degen GC");
149   reset_age();
150   auto cur_state = state();
151   switch (cur_state) {
152     case _empty_uncommitted:
153       do_commit();
154     case _empty_committed:
155     case _cset:
156     case _humongous_start:
157     case _humongous_cont:
158       if (cur_state == _humongous_start || cur_state == _humongous_cont) {
159         // CDS allocates chunks of the heap to fill with regular objects. The allocator
160         // will dutifully track any waste in the unused portion of the last region. Once
161         // CDS has finished initializing the objects, it will convert these regions to
162         // regular regions. The 'waste' in the last region is no longer wasted at this point,
163         // so we must stop treating it as such.
164         decrement_humongous_waste();
165       }
166       set_state(_regular);
167       return;
168     case _pinned_cset:
169       set_state(_pinned);
170       return;
171     case _regular:
172     case _pinned:
173       return;
174     default:
175       report_illegal_transition("regular bypass");
176   }
177 }
178 
179 void ShenandoahHeapRegion::make_humongous_start() {
180   shenandoah_assert_heaplocked();
181   reset_age();
182   switch (state()) {
183     case _empty_uncommitted:
184       do_commit();
185     case _empty_committed:
186       set_state(_humongous_start);
187       return;
188     default:
189       report_illegal_transition("humongous start allocation");
190   }
191 }
192 
193 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) {
194   shenandoah_assert_heaplocked();
195   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
196   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
197   set_affiliation(affiliation);
198   reset_age();
199   switch (state()) {
200     case _empty_committed:
201     case _regular:
202     case _humongous_start:
203     case _humongous_cont:
204       set_state(_humongous_start);
205       return;
206     default:
207       report_illegal_transition("humongous start bypass");
208   }
209 }
210 
211 void ShenandoahHeapRegion::make_humongous_cont() {
212   shenandoah_assert_heaplocked();
213   reset_age();
214   switch (state()) {
215     case _empty_uncommitted:
216       do_commit();
217     case _empty_committed:
218      set_state(_humongous_cont);
219       return;
220     default:
221       report_illegal_transition("humongous continuation allocation");
222   }
223 }
224 
225 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affiliation) {
226   shenandoah_assert_heaplocked();
227   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
228   set_affiliation(affiliation);
229   // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
230   reset_age();
231   switch (state()) {
232     case _empty_committed:
233     case _regular:
234     case _humongous_start:
235     case _humongous_cont:
236       set_state(_humongous_cont);
237       return;
238     default:
239       report_illegal_transition("humongous continuation bypass");
240   }
241 }
242 
243 void ShenandoahHeapRegion::make_pinned() {
244   shenandoah_assert_heaplocked();
245   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
246 
247   switch (state()) {
248     case _regular:
249       set_state(_pinned);
250     case _pinned_cset:
251     case _pinned:
252       return;
253     case _humongous_start:
254       set_state(_pinned_humongous_start);
255     case _pinned_humongous_start:
256       return;
257     case _cset:
258       set_state(_pinned_cset);
259       return;
260     default:
261       report_illegal_transition("pinning");
262   }
263 }
264 
265 void ShenandoahHeapRegion::make_unpinned() {
266   shenandoah_assert_heaplocked();
267   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
268 
269   switch (state()) {
270     case _pinned:
271       assert(is_affiliated(), "Pinned region should be affiliated");
272       set_state(_regular);
273       return;
274     case _regular:
275     case _humongous_start:
276       return;
277     case _pinned_cset:
278       set_state(_cset);
279       return;
280     case _pinned_humongous_start:
281       set_state(_humongous_start);
282       return;
283     default:
284       report_illegal_transition("unpinning");
285   }
286 }
287 
288 void ShenandoahHeapRegion::make_cset() {
289   shenandoah_assert_heaplocked();
290   // Leave age untouched.  We need to consult the age when we are deciding whether to promote evacuated objects.
291   switch (state()) {
292     case _regular:
293       set_state(_cset);
294     case _cset:
295       return;
296     default:
297       report_illegal_transition("cset");
298   }
299 }
300 
301 void ShenandoahHeapRegion::make_trash() {
302   shenandoah_assert_heaplocked();
303   reset_age();
304   switch (state()) {
305     case _humongous_start:
306     case _humongous_cont:
307     {
308       // Reclaiming humongous regions and reclaim humongous waste.  When this region is eventually recycled, we'll reclaim
309       // its used memory.  At recycle time, we no longer recognize this as a humongous region.
310       decrement_humongous_waste();
311     }
312     case _cset:
313       // Reclaiming cset regions
314     case _regular:
315       // Immediate region reclaim
316       set_state(_trash);
317       return;
318     default:
319       report_illegal_transition("trashing");
320   }
321 }
322 
323 void ShenandoahHeapRegion::make_trash_immediate() {
324   make_trash();
325 
326   // On this path, we know there are no marked objects in the region,
327   // tell marking context about it to bypass bitmap resets.
328   assert(ShenandoahHeap::heap()->gc_generation()->is_mark_complete(), "Marking should be complete here.");
329   shenandoah_assert_generations_reconciled();
330   ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
331 }
332 
333 void ShenandoahHeapRegion::make_empty() {
334   reset_age();
335   CENSUS_NOISE(clear_youth();)
336   switch (state()) {
337     case _trash:
338       set_state(_empty_committed);
339       _empty_time = os::elapsedTime();
340       return;
341     default:
342       report_illegal_transition("emptying");
343   }
344 }
345 
346 void ShenandoahHeapRegion::make_uncommitted() {
347   shenandoah_assert_heaplocked();
348   switch (state()) {
349     case _empty_committed:
350       do_uncommit();
351       set_state(_empty_uncommitted);
352       return;
353     default:
354       report_illegal_transition("uncommiting");
355   }
356 }
357 
358 void ShenandoahHeapRegion::make_committed_bypass() {
359   shenandoah_assert_heaplocked();
360   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
361 
362   switch (state()) {
363     case _empty_uncommitted:
364       do_commit();
365       set_state(_empty_committed);
366       return;
367     default:
368       report_illegal_transition("commit bypass");
369   }
370 }
371 
372 void ShenandoahHeapRegion::reset_alloc_metadata() {
373   _tlab_allocs = 0;
374   _gclab_allocs = 0;
375   _plab_allocs = 0;
376 }
377 
378 size_t ShenandoahHeapRegion::get_shared_allocs() const {
379   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
380 }
381 
382 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
383   return _tlab_allocs * HeapWordSize;
384 }
385 
386 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
387   return _gclab_allocs * HeapWordSize;
388 }
389 
390 size_t ShenandoahHeapRegion::get_plab_allocs() const {
391   return _plab_allocs * HeapWordSize;
392 }
393 
394 void ShenandoahHeapRegion::set_live_data(size_t s) {
395   assert(Thread::current()->is_VM_thread(), "by VM thread");
396   _live_data = (s >> LogHeapWordSize);
397 }
398 
399 void ShenandoahHeapRegion::print_on(outputStream* st) const {
400   st->print("|");
401   st->print(SIZE_FORMAT_W(5), this->_index);
402 
403   switch (state()) {
404     case _empty_uncommitted:
405       st->print("|EU ");
406       break;
407     case _empty_committed:
408       st->print("|EC ");
409       break;
410     case _regular:
411       st->print("|R  ");
412       break;
413     case _humongous_start:
414       st->print("|H  ");
415       break;
416     case _pinned_humongous_start:
417       st->print("|HP ");
418       break;
419     case _humongous_cont:
420       st->print("|HC ");
421       break;
422     case _cset:
423       st->print("|CS ");
424       break;
425     case _trash:
426       st->print("|TR ");
427       break;
428     case _pinned:
429       st->print("|P  ");
430       break;
431     case _pinned_cset:
432       st->print("|CSP");
433       break;
434     default:
435       ShouldNotReachHere();
436   }
437 
438   st->print("|%s", shenandoah_affiliation_code(affiliation()));
439 
440 #define SHR_PTR_FORMAT "%12" PRIxPTR
441 
442   st->print("|BTE " SHR_PTR_FORMAT  ", " SHR_PTR_FORMAT ", " SHR_PTR_FORMAT,
443             p2i(bottom()), p2i(top()), p2i(end()));
444   st->print("|TAMS " SHR_PTR_FORMAT,
445             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
446   st->print("|UWM " SHR_PTR_FORMAT,
447             p2i(_update_watermark));
448   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
449   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
450   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
451   if (ShenandoahHeap::heap()->mode()->is_generational()) {
452     st->print("|P " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
453   }
454   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
455   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
456   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
457   st->cr();
458 
459 #undef SHR_PTR_FORMAT
460 }
461 
462 // oop_iterate without closure, return true if completed without cancellation
463 bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
464 
465   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
466   if (!is_active()) {
467     end_preemptible_coalesce_and_fill();
468     return true;
469   }
470 
471   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
472   ShenandoahMarkingContext* marking_context = heap->marking_context();
473 
474   // Expect marking to be completed before these threads invoke this service.
475   assert(heap->gc_generation()->is_mark_complete(), "sanity");
476   shenandoah_assert_generations_reconciled();
477 
478   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
479   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
480   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
481   // and will be treated as live during the current old-gen marking pass, even though they will not be
482   // explicitly marked.
483   HeapWord* t = marking_context->top_at_mark_start(this);
484 
485   // Resume coalesce and fill from this address
486   HeapWord* obj_addr = resume_coalesce_and_fill();
487 
488   while (obj_addr < t) {
489     oop obj = cast_to_oop(obj_addr);
490     if (marking_context->is_marked(obj)) {
491       assert(obj->klass() != nullptr, "klass should not be nullptr");
492       obj_addr += obj->size();
493     } else {
494       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
495       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
496       assert(next_marked_obj <= t, "next marked object cannot exceed top");
497       size_t fill_size = next_marked_obj - obj_addr;
498       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
499       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
500       heap->old_generation()->card_scan()->coalesce_objects(obj_addr, fill_size);
501       obj_addr = next_marked_obj;
502     }
503     if (cancellable && heap->cancelled_gc()) {
504       suspend_coalesce_and_fill(obj_addr);
505       return false;
506     }
507   }
508   // Mark that this region has been coalesced and filled
509   end_preemptible_coalesce_and_fill();
510   return true;
511 }
512 
513 size_t get_card_count(size_t words) {
514   assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
515   assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
516          "slice must be integral number of cards");
517   return words / CardTable::card_size_in_words();
518 }
519 
520 void ShenandoahHeapRegion::oop_iterate_humongous_slice_dirty(OopIterateClosure* blk,
521                                                              HeapWord* start, size_t words, bool write_table) const {
522   assert(is_humongous(), "only humongous region here");
523 
524   ShenandoahHeapRegion* r = humongous_start_region();
525   oop obj = cast_to_oop(r->bottom());
526   size_t num_cards = get_card_count(words);
527 
528   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
529   ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
530   size_t card_index = scanner->card_index_for_addr(start);
531   if (write_table) {
532     while (num_cards-- > 0) {
533       if (scanner->is_write_card_dirty(card_index++)) {
534         obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
535       }
536       start += CardTable::card_size_in_words();
537     }
538   } else {
539     while (num_cards-- > 0) {
540       if (scanner->is_card_dirty(card_index++)) {
541         obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
542       }
543       start += CardTable::card_size_in_words();
544     }
545   }
546 }
547 
548 void ShenandoahHeapRegion::oop_iterate_humongous_slice_all(OopIterateClosure* cl, HeapWord* start, size_t words) const {
549   assert(is_humongous(), "only humongous region here");
550 
551   ShenandoahHeapRegion* r = humongous_start_region();
552   oop obj = cast_to_oop(r->bottom());
553 
554   // Scan all data, regardless of whether cards are dirty
555   obj->oop_iterate(cl, MemRegion(start, start + words));
556 }
557 
558 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
559   ShenandoahHeap* heap = ShenandoahHeap::heap();
560   assert(is_humongous(), "Must be a part of the humongous region");
561   size_t i = index();
562   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
563   while (!r->is_humongous_start()) {
564     assert(i > 0, "Sanity");
565     i--;
566     r = heap->get_region(i);
567     assert(r->is_humongous(), "Must be a part of the humongous region");
568   }
569   assert(r->is_humongous_start(), "Must be");
570   return r;
571 }
572 
573 
574 void ShenandoahHeapRegion::recycle_internal() {
575   assert(_recycling.is_set() && is_trash(), "Wrong state");
576   ShenandoahHeap* heap = ShenandoahHeap::heap();
577 
578   set_top(bottom());
579   clear_live_data();
580   reset_alloc_metadata();
581   heap->marking_context()->reset_top_at_mark_start(this);
582   set_update_watermark(bottom());
583   if (ZapUnusedHeapArea) {
584     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
585   }
586 
587   make_empty();
588   set_affiliation(FREE);
589 }
590 
591 void ShenandoahHeapRegion::try_recycle_under_lock() {
592   shenandoah_assert_heaplocked();
593   if (is_trash() && _recycling.try_set()) {
594     if (is_trash()) {
595       ShenandoahHeap* heap = ShenandoahHeap::heap();
596       ShenandoahGeneration* generation = heap->generation_for(affiliation());
597 
598       heap->decrease_used(generation, used());
599       generation->decrement_affiliated_region_count();
600 
601       recycle_internal();
602     }
603     _recycling.unset();
604   } else {
605     // Ensure recycling is unset before returning to mutator to continue memory allocation.
606     while (_recycling.is_set()) {
607       if (os::is_MP()) {
608         SpinPause();
609       } else {
610         os::naked_yield();
611       }
612     }
613   }
614 }
615 
616 void ShenandoahHeapRegion::try_recycle() {
617   shenandoah_assert_not_heaplocked();
618   if (is_trash() && _recycling.try_set()) {
619     // Double check region state after win the race to set recycling flag
620     if (is_trash()) {
621       ShenandoahHeap* heap = ShenandoahHeap::heap();
622       ShenandoahGeneration* generation = heap->generation_for(affiliation());
623       heap->decrease_used(generation, used());
624       generation->decrement_affiliated_region_count_without_lock();
625 
626       recycle_internal();
627     }
628     _recycling.unset();
629   }
630 }
631 
632 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
633   assert(MemRegion(bottom(), end()).contains(p),
634          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
635          p2i(p), p2i(bottom()), p2i(end()));
636   if (p >= top()) {
637     return top();
638   } else {
639     HeapWord* last = bottom();
640     HeapWord* cur = last;
641     while (cur <= p) {
642       last = cur;
643       cur += cast_to_oop(cur)->size();
644     }
645     shenandoah_assert_correct(nullptr, cast_to_oop(last));
646     return last;
647   }
648 }
649 
650 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
651   assert(MemRegion(bottom(), end()).contains(p),
652          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
653          p2i(p), p2i(bottom()), p2i(end()));
654   if (p < top()) {
655     return cast_to_oop(p)->size();
656   } else {
657     assert(p == top(), "just checking");
658     return pointer_delta(end(), (HeapWord*) p);
659   }
660 }
661 
662 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
663   // Absolute minimums we should not ever break.
664   static const size_t MIN_REGION_SIZE = 256*K;
665 
666   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
667     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
668   }
669 
670   // Generational Shenandoah needs this alignment for card tables.
671   if (strcmp(ShenandoahGCMode, "generational") == 0) {
672     max_heap_size = align_up(max_heap_size , CardTable::ct_max_alignment_constraint());
673   }
674 
675   size_t region_size;
676   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
677     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
678       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
679                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
680                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
681                       MIN_NUM_REGIONS,
682                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
683       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
684     }
685     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
686       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
687                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
688                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
689       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
690     }
691     if (ShenandoahMinRegionSize < MinTLABSize) {
692       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
693                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
694                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
695       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
696     }
697     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
698       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
699                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
700                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
701       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
702     }
703     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
704       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
705                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
706                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
707       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
708     }
709 
710     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
711     // for usual heap sizes. Do not depend on initial_heap_size here.
712     region_size = max_heap_size / ShenandoahTargetNumRegions;
713 
714     // Now make sure that we don't go over or under our limits.
715     region_size = MAX2(ShenandoahMinRegionSize, region_size);
716     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
717 
718   } else {
719     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
720       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
721                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
722                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
723                       MIN_NUM_REGIONS,
724                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
725       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
726     }
727     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
728       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
729                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
730                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
731       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
732     }
733     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
734       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
735                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
736                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
737       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
738     }
739     region_size = ShenandoahRegionSize;
740   }
741 
742   // Make sure region size and heap size are page aligned.
743   // If large pages are used, we ensure that region size is aligned to large page size if
744   // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
745   // region size to regular page size.
746 
747   // Figure out page size to use, and aligns up heap to page size
748   size_t page_size = os::vm_page_size();
749   if (UseLargePages) {
750     size_t large_page_size = os::large_page_size();
751     max_heap_size = align_up(max_heap_size, large_page_size);
752     if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
753       page_size = large_page_size;
754     } else {
755       // Should have been checked during argument initialization
756       assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
757     }
758   } else {
759     max_heap_size = align_up(max_heap_size, page_size);
760   }
761 
762   // Align region size to page size
763   region_size = align_up(region_size, page_size);
764 
765   int region_size_log = log2i(region_size);
766   // Recalculate the region size to make sure it's a power of
767   // 2. This means that region_size is the largest power of 2 that's
768   // <= what we've calculated so far.
769   region_size = size_t(1) << region_size_log;
770 
771   // Now, set up the globals.
772   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
773   RegionSizeBytesShift = (size_t)region_size_log;
774 
775   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
776   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
777 
778   guarantee(RegionSizeBytes == 0, "we should only set it once");
779   RegionSizeBytes = region_size;
780   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
781   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
782 
783   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
784   RegionSizeWordsMask = RegionSizeWords - 1;
785 
786   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
787   RegionSizeBytesMask = RegionSizeBytes - 1;
788 
789   guarantee(RegionCount == 0, "we should only set it once");
790   RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
791   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
792 
793   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
794   MaxTLABSizeWords = align_down(RegionSizeWords, MinObjAlignment);
795 
796   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
797   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
798   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
799 
800   return max_heap_size;
801 }
802 
803 void ShenandoahHeapRegion::do_commit() {
804   ShenandoahHeap* heap = ShenandoahHeap::heap();
805   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
806     report_java_out_of_memory("Unable to commit region");
807   }
808   if (!heap->commit_bitmap_slice(this)) {
809     report_java_out_of_memory("Unable to commit bitmaps for region");
810   }
811   if (AlwaysPreTouch) {
812     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
813   }
814   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
815 }
816 
817 void ShenandoahHeapRegion::do_uncommit() {
818   ShenandoahHeap* heap = ShenandoahHeap::heap();
819   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
820     report_java_out_of_memory("Unable to uncommit region");
821   }
822   if (!heap->uncommit_bitmap_slice(this)) {
823     report_java_out_of_memory("Unable to uncommit bitmaps for region");
824   }
825   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
826 }
827 
828 void ShenandoahHeapRegion::set_state(RegionState to) {
829   EventShenandoahHeapRegionStateChange evt;
830   if (evt.should_commit()){
831     evt.set_index((unsigned) index());
832     evt.set_start((uintptr_t)bottom());
833     evt.set_used(used());
834     evt.set_from(state());
835     evt.set_to(to);
836     evt.commit();
837   }
838   Atomic::store(&_state, to);
839 }
840 
841 void ShenandoahHeapRegion::record_pin() {
842   Atomic::add(&_critical_pins, (size_t)1);
843 }
844 
845 void ShenandoahHeapRegion::record_unpin() {
846   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
847   Atomic::sub(&_critical_pins, (size_t)1);
848 }
849 
850 size_t ShenandoahHeapRegion::pin_count() const {
851   return Atomic::load(&_critical_pins);
852 }
853 
854 void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation) {
855   ShenandoahHeap* heap = ShenandoahHeap::heap();
856 
857   ShenandoahAffiliation region_affiliation = heap->region_affiliation(this);
858   {
859     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
860     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
861                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
862                   index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation),
863                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
864   }
865 
866 #ifdef ASSERT
867   {
868     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
869     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
870     size_t idx = this->index();
871     HeapWord* top_bitmap = ctx->top_bitmap(this);
872 
873     assert(ctx->is_bitmap_range_within_region_clear(top_bitmap, _end),
874            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
875            p2i(top_bitmap), p2i(_end));
876   }
877 #endif
878 
879   if (region_affiliation == new_affiliation) {
880     return;
881   }
882 
883   if (!heap->mode()->is_generational()) {
884     log_trace(gc)("Changing affiliation of region %zu from %s to %s",
885                   index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation));
886     heap->set_affiliation(this, new_affiliation);
887     return;
888   }
889 
890   switch (new_affiliation) {
891     case FREE:
892       assert(!has_live(), "Free region should not have live data");
893       break;
894     case YOUNG_GENERATION:
895       reset_age();
896       break;
897     case OLD_GENERATION:
898       break;
899     default:
900       ShouldNotReachHere();
901       return;
902   }
903   heap->set_affiliation(this, new_affiliation);
904 }
905 
906 void ShenandoahHeapRegion::decrement_humongous_waste() const {
907   assert(is_humongous(), "Should only use this for humongous regions");
908   size_t waste_bytes = free();
909   if (waste_bytes > 0) {
910     ShenandoahHeap* heap = ShenandoahHeap::heap();
911     ShenandoahGeneration* generation = heap->generation_for(affiliation());
912     heap->decrease_humongous_waste(generation, waste_bytes);
913   }
914 }