1 /*
  2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/space.inline.hpp"
 27 #include "gc/shared/tlab_globals.hpp"
 28 #include "gc/shenandoah/shenandoahCardTable.hpp"
 29 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 32 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 33 #include "gc/shenandoah/shenandoahGeneration.hpp"
 34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 35 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 36 #include "jfr/jfrEvents.hpp"
 37 #include "memory/allocation.hpp"
 38 #include "memory/iterator.inline.hpp"
 39 #include "memory/resourceArea.hpp"
 40 #include "memory/universe.hpp"
 41 #include "oops/oop.inline.hpp"
 42 #include "runtime/atomic.hpp"
 43 #include "runtime/globals_extension.hpp"
 44 #include "runtime/java.hpp"
 45 #include "runtime/mutexLocker.hpp"
 46 #include "runtime/os.hpp"
 47 #include "runtime/safepoint.hpp"
 48 #include "utilities/powerOfTwo.hpp"
 49 
 50 
 51 size_t ShenandoahHeapRegion::RegionCount = 0;
 52 size_t ShenandoahHeapRegion::RegionSizeBytes = 0;
 53 size_t ShenandoahHeapRegion::RegionSizeWords = 0;
 54 size_t ShenandoahHeapRegion::RegionSizeBytesShift = 0;
 55 size_t ShenandoahHeapRegion::RegionSizeWordsShift = 0;
 56 size_t ShenandoahHeapRegion::RegionSizeBytesMask = 0;
 57 size_t ShenandoahHeapRegion::RegionSizeWordsMask = 0;
 58 size_t ShenandoahHeapRegion::HumongousThresholdBytes = 0;
 59 size_t ShenandoahHeapRegion::HumongousThresholdWords = 0;
 60 size_t ShenandoahHeapRegion::MaxTLABSizeBytes = 0;
 61 size_t ShenandoahHeapRegion::MaxTLABSizeWords = 0;
 62 
 63 ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed) :
 64   _index(index),
 65   _bottom(start),
 66   _end(start + RegionSizeWords),
 67   _new_top(NULL),
 68   _empty_time(os::elapsedTime()),
 69   _state(committed ? _empty_committed : _empty_uncommitted),
 70   _top(start),
 71   _tlab_allocs(0),
 72   _gclab_allocs(0),
 73   _plab_allocs(0),
 74   _has_young_lab(false),
 75   _live_data(0),
 76   _critical_pins(0),
 77   _update_watermark(start),
 78   _affiliation(FREE),
 79   _age(0) {
 80 
 81   assert(Universe::on_page_boundary(_bottom) && Universe::on_page_boundary(_end),
 82          "invalid space boundaries");
 83   if (ZapUnusedHeapArea && committed) {
 84     SpaceMangler::mangle_region(MemRegion(_bottom, _end));
 85   }
 86 }
 87 
 88 void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
 89   ResourceMark rm;
 90   stringStream ss;
 91   ss.print("Illegal region state transition from \"%s\", at %s\n  ", region_state_to_string(_state), method);
 92   print_on(&ss);
 93   fatal("%s", ss.as_string());
 94 }
 95 
 96 void ShenandoahHeapRegion::make_regular_allocation(ShenandoahRegionAffiliation affiliation) {
 97   shenandoah_assert_heaplocked();
 98   reset_age();
 99   switch (_state) {
100     case _empty_uncommitted:
101       do_commit();
102     case _empty_committed:
103       set_affiliation(affiliation);
104       set_state(_regular);
105     case _regular:
106     case _pinned:
107       return;
108     default:
109       report_illegal_transition("regular allocation");
110   }
111 }
112 
113 void ShenandoahHeapRegion::make_regular_bypass() {
114   shenandoah_assert_heaplocked();
115   assert (ShenandoahHeap::heap()->is_full_gc_in_progress() || ShenandoahHeap::heap()->is_degenerated_gc_in_progress(),
116           "only for full or degen GC");
117   reset_age();
118   switch (_state) {
119     case _empty_uncommitted:
120       do_commit();
121     case _empty_committed:
122     case _cset:
123     case _humongous_start:
124     case _humongous_cont:
125       // TODO: Changing this region to young during compaction may not be
126       // technically correct here because it completely disregards the ages
127       // and origins of the objects being moved. It is, however, certainly
128       // more correct than putting live objects into a region without a
129       // generational affiliation.
130       set_affiliation(YOUNG_GENERATION);
131       set_state(_regular);
132       return;
133     case _pinned_cset:
134       set_state(_pinned);
135       return;
136     case _regular:
137     case _pinned:
138       return;
139     default:
140       report_illegal_transition("regular bypass");
141   }
142 }
143 
144 void ShenandoahHeapRegion::make_humongous_start() {
145   shenandoah_assert_heaplocked();
146   reset_age();
147   switch (_state) {
148     case _empty_uncommitted:
149       do_commit();
150     case _empty_committed:
151       set_state(_humongous_start);
152       return;
153     default:
154       report_illegal_transition("humongous start allocation");
155   }
156 }
157 
158 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahRegionAffiliation affiliation) {
159   shenandoah_assert_heaplocked();
160   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
161   set_affiliation(affiliation);
162   reset_age();
163   switch (_state) {
164     case _empty_committed:
165     case _regular:
166     case _humongous_start:
167     case _humongous_cont:
168       set_state(_humongous_start);
169       return;
170     default:
171       report_illegal_transition("humongous start bypass");
172   }
173 }
174 
175 void ShenandoahHeapRegion::make_humongous_cont() {
176   shenandoah_assert_heaplocked();
177   reset_age();
178   switch (_state) {
179     case _empty_uncommitted:
180       do_commit();
181     case _empty_committed:
182      set_state(_humongous_cont);
183       return;
184     default:
185       report_illegal_transition("humongous continuation allocation");
186   }
187 }
188 
189 void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahRegionAffiliation affiliation) {
190   shenandoah_assert_heaplocked();
191   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
192   set_affiliation(affiliation);
193   reset_age();
194   switch (_state) {
195     case _empty_committed:
196     case _regular:
197     case _humongous_start:
198     case _humongous_cont:
199       set_state(_humongous_cont);
200       return;
201     default:
202       report_illegal_transition("humongous continuation bypass");
203   }
204 }
205 
206 void ShenandoahHeapRegion::make_pinned() {
207   shenandoah_assert_heaplocked();
208   assert(pin_count() > 0, "Should have pins: " SIZE_FORMAT, pin_count());
209 
210   switch (_state) {
211     case _regular:
212       set_state(_pinned);
213     case _pinned_cset:
214     case _pinned:
215       return;
216     case _humongous_start:
217       set_state(_pinned_humongous_start);
218     case _pinned_humongous_start:
219       return;
220     case _cset:
221       _state = _pinned_cset;
222       return;
223     default:
224       report_illegal_transition("pinning");
225   }
226 }
227 
228 void ShenandoahHeapRegion::make_unpinned() {
229   shenandoah_assert_heaplocked();
230   assert(pin_count() == 0, "Should not have pins: " SIZE_FORMAT, pin_count());
231 
232   switch (_state) {
233     case _pinned:
234       assert(affiliation() != FREE, "Pinned region should not be FREE");
235       set_state(_regular);
236       return;
237     case _regular:
238     case _humongous_start:
239       return;
240     case _pinned_cset:
241       set_state(_cset);
242       return;
243     case _pinned_humongous_start:
244       set_state(_humongous_start);
245       return;
246     default:
247       report_illegal_transition("unpinning");
248   }
249 }
250 
251 void ShenandoahHeapRegion::make_cset() {
252   shenandoah_assert_heaplocked();
253   // Leave age untouched.  We need to consult the age when we are deciding whether to promote evacuated objects.
254   switch (_state) {
255     case _regular:
256       set_state(_cset);
257     case _cset:
258       return;
259     default:
260       report_illegal_transition("cset");
261   }
262 }
263 
264 void ShenandoahHeapRegion::make_trash() {
265   shenandoah_assert_heaplocked();
266   reset_age();
267   switch (_state) {
268     case _cset:
269       // Reclaiming cset regions
270     case _humongous_start:
271     case _humongous_cont:
272       // Reclaiming humongous regions
273     case _regular:
274       // Immediate region reclaim
275       set_state(_trash);
276       return;
277     default:
278       report_illegal_transition("trashing");
279   }
280 }
281 
282 void ShenandoahHeapRegion::make_trash_immediate() {
283   make_trash();
284 
285   // On this path, we know there are no marked objects in the region,
286   // tell marking context about it to bypass bitmap resets.
287   assert(ShenandoahHeap::heap()->active_generation()->is_mark_complete(), "Marking should be complete here.");
288   ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
289 }
290 
291 void ShenandoahHeapRegion::make_empty() {
292   shenandoah_assert_heaplocked();
293   reset_age();
294   switch (_state) {
295     case _trash:
296       set_state(_empty_committed);
297       _empty_time = os::elapsedTime();
298       return;
299     default:
300       report_illegal_transition("emptying");
301   }
302 }
303 
304 void ShenandoahHeapRegion::make_uncommitted() {
305   shenandoah_assert_heaplocked();
306   switch (_state) {
307     case _empty_committed:
308       do_uncommit();
309       set_state(_empty_uncommitted);
310       return;
311     default:
312       report_illegal_transition("uncommiting");
313   }
314 }
315 
316 void ShenandoahHeapRegion::make_committed_bypass() {
317   shenandoah_assert_heaplocked();
318   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
319 
320   switch (_state) {
321     case _empty_uncommitted:
322       do_commit();
323       set_state(_empty_committed);
324       return;
325     default:
326       report_illegal_transition("commit bypass");
327   }
328 }
329 
330 void ShenandoahHeapRegion::reset_alloc_metadata() {
331   _tlab_allocs = 0;
332   _gclab_allocs = 0;
333   _plab_allocs = 0;
334 }
335 
336 size_t ShenandoahHeapRegion::get_shared_allocs() const {
337   return used() - (_tlab_allocs + _gclab_allocs + _plab_allocs) * HeapWordSize;
338 }
339 
340 size_t ShenandoahHeapRegion::get_tlab_allocs() const {
341   return _tlab_allocs * HeapWordSize;
342 }
343 
344 size_t ShenandoahHeapRegion::get_gclab_allocs() const {
345   return _gclab_allocs * HeapWordSize;
346 }
347 
348 size_t ShenandoahHeapRegion::get_plab_allocs() const {
349   return _plab_allocs * HeapWordSize;
350 }
351 
352 void ShenandoahHeapRegion::set_live_data(size_t s) {
353   assert(Thread::current()->is_VM_thread(), "by VM thread");
354   _live_data = (s >> LogHeapWordSize);
355 }
356 
357 void ShenandoahHeapRegion::print_on(outputStream* st) const {
358   st->print("|");
359   st->print(SIZE_FORMAT_W(5), this->_index);
360 
361   switch (_state) {
362     case _empty_uncommitted:
363       st->print("|EU ");
364       break;
365     case _empty_committed:
366       st->print("|EC ");
367       break;
368     case _regular:
369       st->print("|R  ");
370       break;
371     case _humongous_start:
372       st->print("|H  ");
373       break;
374     case _pinned_humongous_start:
375       st->print("|HP ");
376       break;
377     case _humongous_cont:
378       st->print("|HC ");
379       break;
380     case _cset:
381       st->print("|CS ");
382       break;
383     case _trash:
384       st->print("|T  ");
385       break;
386     case _pinned:
387       st->print("|P  ");
388       break;
389     case _pinned_cset:
390       st->print("|CSP");
391       break;
392     default:
393       ShouldNotReachHere();
394   }
395   switch (_affiliation) {
396     case ShenandoahRegionAffiliation::FREE:
397       st->print("|F");
398       break;
399     case ShenandoahRegionAffiliation::YOUNG_GENERATION:
400       st->print("|Y");
401       break;
402     case ShenandoahRegionAffiliation::OLD_GENERATION:
403       st->print("|O");
404       break;
405     default:
406       ShouldNotReachHere();
407   }
408   st->print("|BTE " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12) ", " INTPTR_FORMAT_W(12),
409             p2i(bottom()), p2i(top()), p2i(end()));
410   st->print("|TAMS " INTPTR_FORMAT_W(12),
411             p2i(ShenandoahHeap::heap()->marking_context()->top_at_mark_start(const_cast<ShenandoahHeapRegion*>(this))));
412   st->print("|UWM " INTPTR_FORMAT_W(12),
413             p2i(_update_watermark));
414   st->print("|U " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(used()),                proper_unit_for_byte_size(used()));
415   st->print("|T " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_tlab_allocs()),     proper_unit_for_byte_size(get_tlab_allocs()));
416   st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_gclab_allocs()),    proper_unit_for_byte_size(get_gclab_allocs()));
417   if (ShenandoahHeap::heap()->mode()->is_generational()) {
418     st->print("|G " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_plab_allocs()),   proper_unit_for_byte_size(get_plab_allocs()));
419   }
420   st->print("|S " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_shared_allocs()),   proper_unit_for_byte_size(get_shared_allocs()));
421   st->print("|L " SIZE_FORMAT_W(5) "%1s", byte_size_in_proper_unit(get_live_data_bytes()), proper_unit_for_byte_size(get_live_data_bytes()));
422   st->print("|CP " SIZE_FORMAT_W(3), pin_count());
423   st->cr();
424 }
425 
426 // oop_iterate without closure and without cancellation.  always return true.
427 bool ShenandoahHeapRegion::oop_fill_and_coalesce_wo_cancel() {
428   HeapWord* obj_addr = resume_coalesce_and_fill();
429 
430   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
431   if (!is_active()) {
432     end_preemptible_coalesce_and_fill();
433     return true;
434   }
435 
436   ShenandoahHeap* heap = ShenandoahHeap::heap();
437   ShenandoahMarkingContext* marking_context = heap->marking_context();
438   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
439   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
440   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
441   // and will be treated as live during the current old-gen marking pass, even though they will not be
442   // explicitly marked.
443   HeapWord* t = marking_context->top_at_mark_start(this);
444 
445   // Expect marking to be completed before these threads invoke this service.
446   assert(heap->active_generation()->is_mark_complete(), "sanity");
447   while (obj_addr < t) {
448     oop obj = cast_to_oop(obj_addr);
449     if (marking_context->is_marked(obj)) {
450       assert(obj->klass() != NULL, "klass should not be NULL");
451       obj_addr += obj->size();
452     } else {
453       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
454       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
455       assert(next_marked_obj <= t, "next marked object cannot exceed top");
456       size_t fill_size = next_marked_obj - obj_addr;
457       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
458       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
459       obj_addr = next_marked_obj;
460     }
461   }
462   // Mark that this region has been coalesced and filled
463   end_preemptible_coalesce_and_fill();
464   return true;
465 }
466 
467 // oop_iterate without closure, return true if completed without cancellation
468 bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
469   HeapWord* obj_addr = resume_coalesce_and_fill();
470   // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
471   const size_t preemption_stride = 128;
472 
473   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
474   if (!is_active()) {
475     end_preemptible_coalesce_and_fill();
476     return true;
477   }
478 
479   ShenandoahHeap* heap = ShenandoahHeap::heap();
480   ShenandoahMarkingContext* marking_context = heap->marking_context();
481   // All objects above TAMS are considered live even though their mark bits will not be set.  Note that young-
482   // gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
483   // while the old-gen concurrent marking is ongoing.  These newly promoted objects will reside above TAMS
484   // and will be treated as live during the current old-gen marking pass, even though they will not be
485   // explicitly marked.
486   HeapWord* t = marking_context->top_at_mark_start(this);
487 
488   // Expect marking to be completed before these threads invoke this service.
489   assert(heap->active_generation()->is_mark_complete(), "sanity");
490 
491   size_t ops_before_preempt_check = preemption_stride;
492   while (obj_addr < t) {
493     oop obj = cast_to_oop(obj_addr);
494     if (marking_context->is_marked(obj)) {
495       assert(obj->klass() != NULL, "klass should not be NULL");
496       obj_addr += obj->size();
497     } else {
498       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
499       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
500       assert(next_marked_obj <= t, "next marked object cannot exceed top");
501       size_t fill_size = next_marked_obj - obj_addr;
502       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
503       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
504       obj_addr = next_marked_obj;
505     }
506     if (ops_before_preempt_check-- == 0) {
507       if (heap->cancelled_gc()) {
508         suspend_coalesce_and_fill(obj_addr);
509         return false;
510       }
511       ops_before_preempt_check = preemption_stride;
512     }
513   }
514   // Mark that this region has been coalesced and filled
515   end_preemptible_coalesce_and_fill();
516   return true;
517 }
518 
519 void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
520   if (!is_active()) return;
521   if (is_humongous()) {
522     // No need to fill dead within humongous regions.  Either the entire region is dead, or the entire region is
523     // unchanged.  A humongous region holds no more than one humongous object.
524     oop_iterate_humongous(blk);
525   } else {
526     global_oop_iterate_objects_and_fill_dead(blk);
527   }
528 }
529 
530 void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
531   assert(!is_humongous(), "no humongous region here");
532   HeapWord* obj_addr = bottom();
533 
534   ShenandoahHeap* heap = ShenandoahHeap::heap();
535   ShenandoahMarkingContext* marking_context = heap->marking_context();
536   RememberedScanner* rem_set_scanner = heap->card_scan();
537   // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
538   HeapWord* t = marking_context->top_at_mark_start(this);
539 
540   assert(heap->active_generation()->is_mark_complete(), "sanity");
541 
542   while (obj_addr < t) {
543     oop obj = cast_to_oop(obj_addr);
544     if (marking_context->is_marked(obj)) {
545       assert(obj->klass() != NULL, "klass should not be NULL");
546       // when promoting an entire region, we have to register the marked objects as well
547       obj_addr += obj->oop_iterate_size(blk);
548     } else {
549       // Object is not marked.  Coalesce and fill dead object with dead neighbors.
550       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
551       assert(next_marked_obj <= t, "next marked object cannot exceed top");
552       size_t fill_size = next_marked_obj - obj_addr;
553       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
554 
555       // coalesce_objects() unregisters all but first object subsumed within coalesced range.
556       rem_set_scanner->coalesce_objects(obj_addr, fill_size);
557       obj_addr = next_marked_obj;
558     }
559   }
560 
561   // Any object above TAMS and below top() is considered live.
562   t = top();
563   while (obj_addr < t) {
564     oop obj = cast_to_oop(obj_addr);
565     obj_addr += obj->oop_iterate_size(blk);
566   }
567 }
568 
569 void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
570   assert(is_humongous(), "only humongous region here");
571   // Find head.
572   ShenandoahHeapRegion* r = humongous_start_region();
573   assert(r->is_humongous_start(), "need humongous head here");
574   oop obj = cast_to_oop(r->bottom());
575   obj->oop_iterate(blk, MemRegion(bottom(), top()));
576 }
577 
578 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
579   ShenandoahHeap* heap = ShenandoahHeap::heap();
580   assert(is_humongous(), "Must be a part of the humongous region");
581   size_t i = index();
582   ShenandoahHeapRegion* r = const_cast<ShenandoahHeapRegion*>(this);
583   while (!r->is_humongous_start()) {
584     assert(i > 0, "Sanity");
585     i--;
586     r = heap->get_region(i);
587     assert(r->is_humongous(), "Must be a part of the humongous region");
588   }
589   assert(r->is_humongous_start(), "Must be");
590   return r;
591 }
592 
593 void ShenandoahHeapRegion::recycle() {
594   ShenandoahHeap* heap = ShenandoahHeap::heap();
595 
596   if (affiliation() == YOUNG_GENERATION) {
597     heap->young_generation()->decrease_used(used());
598   } else if (affiliation() == OLD_GENERATION) {
599     heap->old_generation()->decrease_used(used());
600   }
601 
602   set_top(bottom());
603   clear_live_data();
604 
605   reset_alloc_metadata();
606 
607   heap->marking_context()->reset_top_at_mark_start(this);
608   set_update_watermark(bottom());
609 
610   make_empty();
611   set_affiliation(FREE);
612 
613   if (ZapUnusedHeapArea) {
614     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
615   }
616 }
617 
618 HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
619   assert(MemRegion(bottom(), end()).contains(p),
620          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
621          p2i(p), p2i(bottom()), p2i(end()));
622   if (p >= top()) {
623     return top();
624   } else {
625     HeapWord* last = bottom();
626     HeapWord* cur = last;
627     while (cur <= p) {
628       last = cur;
629       cur += cast_to_oop(cur)->size();
630     }
631     shenandoah_assert_correct(NULL, cast_to_oop(last));
632     return last;
633   }
634 }
635 
636 size_t ShenandoahHeapRegion::block_size(const HeapWord* p) const {
637   assert(MemRegion(bottom(), end()).contains(p),
638          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
639          p2i(p), p2i(bottom()), p2i(end()));
640   if (p < top()) {
641     return cast_to_oop(p)->size();
642   } else {
643     assert(p == top(), "just checking");
644     return pointer_delta(end(), (HeapWord*) p);
645   }
646 }
647 
648 size_t ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
649   // Absolute minimums we should not ever break.
650   static const size_t MIN_REGION_SIZE = 256*K;
651 
652   if (FLAG_IS_DEFAULT(ShenandoahMinRegionSize)) {
653     FLAG_SET_DEFAULT(ShenandoahMinRegionSize, MIN_REGION_SIZE);
654   }
655 
656   size_t region_size;
657   if (FLAG_IS_DEFAULT(ShenandoahRegionSize)) {
658     if (ShenandoahMinRegionSize > max_heap_size / MIN_NUM_REGIONS) {
659       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
660                       "of regions (" SIZE_FORMAT ") of minimum region size (" SIZE_FORMAT "%s).",
661                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
662                       MIN_NUM_REGIONS,
663                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize));
664       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
665     }
666     if (ShenandoahMinRegionSize < MIN_REGION_SIZE) {
667       err_msg message("" SIZE_FORMAT "%s should not be lower than minimum region size (" SIZE_FORMAT "%s).",
668                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
669                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
670       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
671     }
672     if (ShenandoahMinRegionSize < MinTLABSize) {
673       err_msg message("" SIZE_FORMAT "%s should not be lower than TLAB size size (" SIZE_FORMAT "%s).",
674                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
675                       byte_size_in_proper_unit(MinTLABSize),             proper_unit_for_byte_size(MinTLABSize));
676       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize option", message);
677     }
678     if (ShenandoahMaxRegionSize < MIN_REGION_SIZE) {
679       err_msg message("" SIZE_FORMAT "%s should not be lower than min region size (" SIZE_FORMAT "%s).",
680                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize),
681                       byte_size_in_proper_unit(MIN_REGION_SIZE),         proper_unit_for_byte_size(MIN_REGION_SIZE));
682       vm_exit_during_initialization("Invalid -XX:ShenandoahMaxRegionSize option", message);
683     }
684     if (ShenandoahMinRegionSize > ShenandoahMaxRegionSize) {
685       err_msg message("Minimum (" SIZE_FORMAT "%s) should be larger than maximum (" SIZE_FORMAT "%s).",
686                       byte_size_in_proper_unit(ShenandoahMinRegionSize), proper_unit_for_byte_size(ShenandoahMinRegionSize),
687                       byte_size_in_proper_unit(ShenandoahMaxRegionSize), proper_unit_for_byte_size(ShenandoahMaxRegionSize));
688       vm_exit_during_initialization("Invalid -XX:ShenandoahMinRegionSize or -XX:ShenandoahMaxRegionSize", message);
689     }
690 
691     // We rapidly expand to max_heap_size in most scenarios, so that is the measure
692     // for usual heap sizes. Do not depend on initial_heap_size here.
693     region_size = max_heap_size / ShenandoahTargetNumRegions;
694 
695     // Now make sure that we don't go over or under our limits.
696     region_size = MAX2(ShenandoahMinRegionSize, region_size);
697     region_size = MIN2(ShenandoahMaxRegionSize, region_size);
698 
699   } else {
700     if (ShenandoahRegionSize > max_heap_size / MIN_NUM_REGIONS) {
701       err_msg message("Max heap size (" SIZE_FORMAT "%s) is too low to afford the minimum number "
702                               "of regions (" SIZE_FORMAT ") of requested size (" SIZE_FORMAT "%s).",
703                       byte_size_in_proper_unit(max_heap_size), proper_unit_for_byte_size(max_heap_size),
704                       MIN_NUM_REGIONS,
705                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize));
706       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
707     }
708     if (ShenandoahRegionSize < ShenandoahMinRegionSize) {
709       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be larger than min region size (" SIZE_FORMAT "%s).",
710                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
711                       byte_size_in_proper_unit(ShenandoahMinRegionSize),  proper_unit_for_byte_size(ShenandoahMinRegionSize));
712       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
713     }
714     if (ShenandoahRegionSize > ShenandoahMaxRegionSize) {
715       err_msg message("Heap region size (" SIZE_FORMAT "%s) should be lower than max region size (" SIZE_FORMAT "%s).",
716                       byte_size_in_proper_unit(ShenandoahRegionSize), proper_unit_for_byte_size(ShenandoahRegionSize),
717                       byte_size_in_proper_unit(ShenandoahMaxRegionSize),  proper_unit_for_byte_size(ShenandoahMaxRegionSize));
718       vm_exit_during_initialization("Invalid -XX:ShenandoahRegionSize option", message);
719     }
720     region_size = ShenandoahRegionSize;
721   }
722 
723   // Make sure region size and heap size are page aligned.
724   // If large pages are used, we ensure that region size is aligned to large page size if
725   // heap size is large enough to accommodate minimal number of regions. Otherwise, we align
726   // region size to regular page size.
727 
728   // Figure out page size to use, and aligns up heap to page size
729   int page_size = os::vm_page_size();
730   if (UseLargePages) {
731     size_t large_page_size = os::large_page_size();
732     max_heap_size = align_up(max_heap_size, large_page_size);
733     if ((max_heap_size / align_up(region_size, large_page_size)) >= MIN_NUM_REGIONS) {
734       page_size = (int)large_page_size;
735     } else {
736       // Should have been checked during argument initialization
737       assert(!ShenandoahUncommit, "Uncommit requires region size aligns to large page size");
738     }
739   } else {
740     max_heap_size = align_up(max_heap_size, page_size);
741   }
742 
743   // Align region size to page size
744   region_size = align_up(region_size, page_size);
745 
746   int region_size_log = log2i(region_size);
747   // Recalculate the region size to make sure it's a power of
748   // 2. This means that region_size is the largest power of 2 that's
749   // <= what we've calculated so far.
750   region_size = size_t(1) << region_size_log;
751 
752   // Now, set up the globals.
753   guarantee(RegionSizeBytesShift == 0, "we should only set it once");
754   RegionSizeBytesShift = (size_t)region_size_log;
755 
756   guarantee(RegionSizeWordsShift == 0, "we should only set it once");
757   RegionSizeWordsShift = RegionSizeBytesShift - LogHeapWordSize;
758 
759   guarantee(RegionSizeBytes == 0, "we should only set it once");
760   RegionSizeBytes = region_size;
761   RegionSizeWords = RegionSizeBytes >> LogHeapWordSize;
762   assert (RegionSizeWords*HeapWordSize == RegionSizeBytes, "sanity");
763 
764   guarantee(RegionSizeWordsMask == 0, "we should only set it once");
765   RegionSizeWordsMask = RegionSizeWords - 1;
766 
767   guarantee(RegionSizeBytesMask == 0, "we should only set it once");
768   RegionSizeBytesMask = RegionSizeBytes - 1;
769 
770   guarantee(RegionCount == 0, "we should only set it once");
771   RegionCount = align_up(max_heap_size, RegionSizeBytes) / RegionSizeBytes;
772   guarantee(RegionCount >= MIN_NUM_REGIONS, "Should have at least minimum regions");
773 
774   guarantee(HumongousThresholdWords == 0, "we should only set it once");
775   HumongousThresholdWords = RegionSizeWords * ShenandoahHumongousThreshold / 100;
776   HumongousThresholdWords = align_down(HumongousThresholdWords, MinObjAlignment);
777   assert (HumongousThresholdWords <= RegionSizeWords, "sanity");
778 
779   guarantee(HumongousThresholdBytes == 0, "we should only set it once");
780   HumongousThresholdBytes = HumongousThresholdWords * HeapWordSize;
781   assert (HumongousThresholdBytes <= RegionSizeBytes, "sanity");
782 
783   // The rationale for trimming the TLAB sizes has to do with the raciness in
784   // TLAB allocation machinery. It may happen that TLAB sizing policy polls Shenandoah
785   // about next free size, gets the answer for region #N, goes away for a while, then
786   // tries to allocate in region #N, and fail because some other thread have claimed part
787   // of the region #N, and then the freeset allocation code has to retire the region #N,
788   // before moving the allocation to region #N+1.
789   //
790   // The worst case realizes when "answer" is "region size", which means it could
791   // prematurely retire an entire region. Having smaller TLABs does not fix that
792   // completely, but reduces the probability of too wasteful region retirement.
793   // With current divisor, we will waste no more than 1/8 of region size in the worst
794   // case. This also has a secondary effect on collection set selection: even under
795   // the race, the regions would be at least 7/8 used, which allows relying on
796   // "used" - "live" for cset selection. Otherwise, we can get the fragmented region
797   // below the garbage threshold that would never be considered for collection.
798   //
799   // The whole thing is mitigated if Elastic TLABs are enabled.
800   //
801   guarantee(MaxTLABSizeWords == 0, "we should only set it once");
802   MaxTLABSizeWords = MIN2(ShenandoahElasticTLAB ? RegionSizeWords : (RegionSizeWords / 8), HumongousThresholdWords);
803   MaxTLABSizeWords = align_down(MaxTLABSizeWords, MinObjAlignment);
804 
805   guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
806   MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
807   assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
808 
809   return max_heap_size;
810 }
811 
812 void ShenandoahHeapRegion::do_commit() {
813   ShenandoahHeap* heap = ShenandoahHeap::heap();
814   if (!heap->is_heap_region_special() && !os::commit_memory((char *) bottom(), RegionSizeBytes, false)) {
815     report_java_out_of_memory("Unable to commit region");
816   }
817   if (!heap->commit_bitmap_slice(this)) {
818     report_java_out_of_memory("Unable to commit bitmaps for region");
819   }
820   if (AlwaysPreTouch) {
821     os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
822   }
823   heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
824 }
825 
826 void ShenandoahHeapRegion::do_uncommit() {
827   ShenandoahHeap* heap = ShenandoahHeap::heap();
828   if (!heap->is_heap_region_special() && !os::uncommit_memory((char *) bottom(), RegionSizeBytes)) {
829     report_java_out_of_memory("Unable to uncommit region");
830   }
831   if (!heap->uncommit_bitmap_slice(this)) {
832     report_java_out_of_memory("Unable to uncommit bitmaps for region");
833   }
834   heap->decrease_committed(ShenandoahHeapRegion::region_size_bytes());
835 }
836 
837 void ShenandoahHeapRegion::set_state(RegionState to) {
838   EventShenandoahHeapRegionStateChange evt;
839   if (evt.should_commit()){
840     evt.set_index((unsigned) index());
841     evt.set_start((uintptr_t)bottom());
842     evt.set_used(used());
843     evt.set_from(_state);
844     evt.set_to(to);
845     evt.commit();
846   }
847   _state = to;
848 }
849 
850 void ShenandoahHeapRegion::record_pin() {
851   Atomic::add(&_critical_pins, (size_t)1);
852 }
853 
854 void ShenandoahHeapRegion::record_unpin() {
855   assert(pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", index());
856   Atomic::sub(&_critical_pins, (size_t)1);
857 }
858 
859 size_t ShenandoahHeapRegion::pin_count() const {
860   return Atomic::load(&_critical_pins);
861 }
862 
863 void ShenandoahHeapRegion::set_affiliation(ShenandoahRegionAffiliation new_affiliation) {
864   ShenandoahHeap* heap = ShenandoahHeap::heap();
865 
866   {
867     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
868     log_debug(gc)("Setting affiliation of Region " SIZE_FORMAT " from %s to %s, top: " PTR_FORMAT ", TAMS: " PTR_FORMAT
869                   ", watermark: " PTR_FORMAT ", top_bitmap: " PTR_FORMAT,
870                   index(), affiliation_name(_affiliation), affiliation_name(new_affiliation),
871                   p2i(top()), p2i(ctx->top_at_mark_start(this)), p2i(_update_watermark), p2i(ctx->top_bitmap(this)));
872   }
873 
874 #ifdef ASSERT
875   {
876     // During full gc, heap->complete_marking_context() is not valid, may equal nullptr.
877     ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
878     size_t idx = this->index();
879     HeapWord* top_bitmap = ctx->top_bitmap(this);
880 
881     assert(ctx->is_bitmap_clear_range(top_bitmap, _end),
882            "Region " SIZE_FORMAT ", bitmap should be clear between top_bitmap: " PTR_FORMAT " and end: " PTR_FORMAT, idx,
883            p2i(top_bitmap), p2i(_end));
884   }
885 #endif
886 
887   if (_affiliation == new_affiliation) {
888     return;
889   }
890 
891   if (!heap->mode()->is_generational()) {
892     _affiliation = new_affiliation;
893     return;
894   }
895 
896   log_trace(gc)("Changing affiliation of region %zu from %s to %s",
897     index(), affiliation_name(_affiliation), affiliation_name(new_affiliation));
898 
899   if (_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
900     heap->young_generation()->decrement_affiliated_region_count();
901   } else if (_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
902     heap->old_generation()->decrement_affiliated_region_count();
903   }
904 
905   switch (new_affiliation) {
906     case FREE:
907       assert(!has_live(), "Free region should not have live data");
908       break;
909     case YOUNG_GENERATION:
910       reset_age();
911       heap->young_generation()->increment_affiliated_region_count();
912       break;
913     case OLD_GENERATION:
914       heap->old_generation()->increment_affiliated_region_count();
915       break;
916     default:
917       ShouldNotReachHere();
918       return;
919   }
920   _affiliation = new_affiliation;
921 }
922 
923 size_t ShenandoahHeapRegion::promote_humongous() {
924   ShenandoahHeap* heap = ShenandoahHeap::heap();
925   ShenandoahMarkingContext* marking_context = heap->marking_context();
926   assert(heap->active_generation()->is_mark_complete(), "sanity");
927   assert(is_young(), "Only young regions can be promoted");
928   assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
929   assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
930 
931   ShenandoahGeneration* old_generation = heap->old_generation();
932   ShenandoahGeneration* young_generation = heap->young_generation();
933 
934   oop obj = cast_to_oop(bottom());
935   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
936 
937   size_t spanned_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize);
938   size_t index_limit = index() + spanned_regions;
939 
940   log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
941 
942   // Since this region may have served previously as OLD, it may hold obsolete object range info.
943   heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
944   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
945   heap->card_scan()->register_object_wo_lock(bottom());
946 
947   // For this region and each humongous continuation region spanned by this humongous object, change
948   // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
949   // in the last humongous region that is not spanned by obj is currently not used.
950   for (size_t i = index(); i < index_limit; i++) {
951     ShenandoahHeapRegion* r = heap->get_region(i);
952     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
953                   r->index(), p2i(r->bottom()), p2i(r->top()));
954     // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
955     r->set_affiliation(OLD_GENERATION);
956     old_generation->increase_used(r->used());
957     young_generation->decrease_used(r->used());
958   }
959   if (obj->is_typeArray()) {
960     // Primitive arrays don't need to be scanned.  See above TODO question about requiring
961     // region promotion at safepoint.
962     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
963                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
964     heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
965   } else {
966     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
967                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
968     heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
969   }
970   return index_limit - index();
971 }