1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shared/fullGCForwarding.inline.hpp"
 28 #include "gc/shared/preservedMarks.inline.hpp"
 29 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 31 #include "gc/shenandoah/shenandoahGeneration.hpp"
 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 33 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 35 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 36 #include "gc/shenandoah/shenandoahUtils.hpp"
 37 
 38 #ifdef ASSERT
 39 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {
 40   assert(generation->used_regions_size() <= generation->max_capacity(),
 41          "%s generation affiliated regions must be less than capacity", generation->name());
 42 }
 43 
 44 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {
 45   assert(generation->used_including_humongous_waste() <= generation->used_regions_size(),
 46          "%s consumed can be no larger than span of affiliated regions", generation->name());
 47 }
 48 #else
 49 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {}
 50 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {}
 51 #endif
 52 
 53 
 54 void ShenandoahGenerationalFullGC::prepare() {
 55   auto heap = ShenandoahGenerationalHeap::heap();
 56   // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
 57   heap->set_gc_generation(heap->global_generation());
 58   heap->set_active_generation();
 59 
 60   // No need for old_gen->increase_used() as this was done when plabs were allocated.
 61   heap->reset_generation_reserves();
 62 
 63   // Full GC supersedes any marking or coalescing in old generation.
 64   heap->old_generation()->cancel_gc();
 65 }
 66 
 67 void ShenandoahGenerationalFullGC::handle_completion(ShenandoahHeap* heap) {
 68   // Full GC should reset time since last gc for young and old heuristics
 69   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
 70   ShenandoahYoungGeneration* young = gen_heap->young_generation();
 71   ShenandoahOldGeneration* old = gen_heap->old_generation();
 72   young->heuristics()->record_cycle_end();
 73   old->heuristics()->record_cycle_end();
 74 
 75   gen_heap->mmu_tracker()->record_full(GCId::current());
 76   gen_heap->log_heap_status("At end of Full GC");
 77 
 78   assert(old->is_idle(), "After full GC, old generation should be idle.");
 79 
 80   // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are
 81   // made valid by the time Full GC completes.
 82   assert_regions_used_not_more_than_capacity(old);
 83   assert_regions_used_not_more_than_capacity(young);
 84   assert_usage_not_more_than_regions_used(old);
 85   assert_usage_not_more_than_regions_used(young);
 86 
 87   // Establish baseline for next old-has-grown trigger.
 88   old->set_live_bytes_after_last_mark(old->used_including_humongous_waste());
 89 }
 90 
 91 void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) {
 92   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
 93   ShenandoahRegionIterator regions;
 94   ShenandoahReconstructRememberedSetTask task(&regions);
 95   heap->workers()->run_task(&task);
 96 
 97   // Rebuilding the remembered set recomputes all the card offsets for objects.
 98   // The adjust pointers phase coalesces and fills all necessary regions. In case
 99   // we came to the full GC from an incomplete global cycle, we need to indicate
100   // that the old regions are parsable.
101   heap->old_generation()->set_parsable(true);
102 }
103 
104 void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) {
105   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
106   ShenandoahOldGeneration* const old_gen = gen_heap->old_generation();
107 
108   size_t old_usage = old_gen->used_regions_size();
109   size_t old_capacity = old_gen->max_capacity();
110 
111   assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must align with region size");
112   assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must align with region size");
113 
114   if (old_capacity > old_usage) {
115     size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes();
116     gen_heap->generation_sizer()->transfer_to_young(excess_old_regions);
117   } else if (old_capacity < old_usage) {
118     size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes();
119     gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit);
120   }
121 
122   log_info(gc, ergo)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT,
123                PROPERFMTARGS(gen_heap->young_generation()->used()),
124                PROPERFMTARGS(old_gen->used()));
125 }
126 
127 void ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() {
128   auto result = ShenandoahGenerationalHeap::heap()->balance_generations();
129   LogTarget(Info, gc, ergo) lt;
130   if (lt.is_enabled()) {
131     LogStream ls(lt);
132     result.print_on("Full GC", &ls);
133   }
134 }
135 
136 void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) {
137   LogTarget(Debug, gc) lt;
138   if (lt.is_enabled()) {
139     size_t live_bytes_in_old = 0;
140     for (size_t i = 0; i < heap->num_regions(); i++) {
141       ShenandoahHeapRegion* r = heap->get_region(i);
142       if (r->is_old()) {
143         live_bytes_in_old += r->get_live_data_bytes();
144       }
145     }
146     log_debug(gc)("Live bytes in old after STW mark: " PROPERFMT, PROPERFMTARGS(live_bytes_in_old));
147   }
148 }
149 
150 void ShenandoahGenerationalFullGC::restore_top_before_promote(ShenandoahHeap* heap) {
151   for (size_t i = 0; i < heap->num_regions(); i++) {
152     ShenandoahHeapRegion* r = heap->get_region(i);
153     if (r->get_top_before_promote() != nullptr) {
154       r->restore_top_before_promote();
155     }
156   }
157 }
158 
159 void ShenandoahGenerationalFullGC::account_for_region(ShenandoahHeapRegion* r, size_t &region_count, size_t &region_usage, size_t &humongous_waste) {
160   region_count++;
161   region_usage += r->used();
162   if (r->is_humongous_start()) {
163     // For each humongous object, we take this path once regardless of how many regions it spans.
164     HeapWord* obj_addr = r->bottom();
165     oop obj = cast_to_oop(obj_addr);
166     size_t word_size = obj->size();
167     size_t region_size_words = ShenandoahHeapRegion::region_size_words();
168     size_t overreach = word_size % region_size_words;
169     if (overreach != 0) {
170       humongous_waste += (region_size_words - overreach) * HeapWordSize;
171     }
172     // else, this humongous object aligns exactly on region size, so no waste.
173   }
174 }
175 
176 void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeapRegion* r) {
177   if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) {
178     r->begin_preemptible_coalesce_and_fill();
179     r->oop_coalesce_and_fill(false);
180   }
181 }
182 
183 void ShenandoahGenerationalFullGC::compute_balances() {
184   auto heap = ShenandoahGenerationalHeap::heap();
185 
186   // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
187   heap->old_generation()->set_promotion_potential(0);
188   // Invoke this in case we are able to transfer memory from OLD to YOUNG.
189   heap->compute_old_generation_balance(0, 0);
190 }
191 
192 ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,
193                                                           GrowableArray<ShenandoahHeapRegion*>& empty_regions,
194                                                           ShenandoahHeapRegion* from_region, uint worker_id) :
195         _preserved_marks(preserved_marks),
196         _heap(ShenandoahGenerationalHeap::heap()),
197         _tenuring_threshold(0),
198         _empty_regions(empty_regions),
199         _empty_regions_pos(0),
200         _old_to_region(nullptr),
201         _young_to_region(nullptr),
202         _from_region(nullptr),
203         _from_affiliation(ShenandoahAffiliation::FREE),
204         _old_compact_point(nullptr),
205         _young_compact_point(nullptr),
206         _worker_id(worker_id) {
207   assert(from_region != nullptr, "Worker needs from_region");
208   // assert from_region has live?
209   if (from_region->is_old()) {
210     _old_to_region = from_region;
211     _old_compact_point = from_region->bottom();
212   } else if (from_region->is_young()) {
213     _young_to_region = from_region;
214     _young_compact_point = from_region->bottom();
215   }
216 
217   _tenuring_threshold = _heap->age_census()->tenuring_threshold();
218 }
219 
220 void ShenandoahPrepareForGenerationalCompactionObjectClosure::set_from_region(ShenandoahHeapRegion* from_region) {
221   log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live",
222                 _worker_id, from_region->affiliation_name(),
223                 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
224 
225   _from_region = from_region;
226   _from_affiliation = from_region->affiliation();
227   if (_from_region->has_live()) {
228     if (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION) {
229       if (_old_to_region == nullptr) {
230         _old_to_region = from_region;
231         _old_compact_point = from_region->bottom();
232       }
233     } else {
234       assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
235       if (_young_to_region == nullptr) {
236         _young_to_region = from_region;
237         _young_compact_point = from_region->bottom();
238       }
239     }
240   } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
241 }
242 
243 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish() {
244   finish_old_region();
245   finish_young_region();
246 }
247 
248 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_old_region() {
249   if (_old_to_region != nullptr) {
250     log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u",
251             _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
252     _old_to_region->set_new_top(_old_compact_point);
253     _old_to_region = nullptr;
254   }
255 }
256 
257 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
258   if (_young_to_region != nullptr) {
259     log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
260             _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
261     _young_to_region->set_new_top(_young_compact_point);
262     _young_to_region = nullptr;
263   }
264 }
265 
266 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
267   return (_from_region == _old_to_region) || (_from_region == _young_to_region);
268 }
269 
270 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
271   assert(_from_region != nullptr, "must set before work");
272   assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
273          "Object must reside in _from_region");
274   assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
275   assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
276 
277   size_t obj_size = p->size();
278   uint from_region_age = _from_region->age();
279   uint object_age = p->age();
280 
281   bool promote_object = false;
282   if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
283       (from_region_age + object_age >= _tenuring_threshold)) {
284     if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) {
285       finish_old_region();
286       _old_to_region = nullptr;
287     }
288     if (_old_to_region == nullptr) {
289       if (_empty_regions_pos < _empty_regions.length()) {
290         ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
291         _empty_regions_pos++;
292         new_to_region->set_affiliation(OLD_GENERATION);
293         _old_to_region = new_to_region;
294         _old_compact_point = _old_to_region->bottom();
295         promote_object = true;
296       }
297       // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
298       // we leave promote_object as false, deferring the promotion.
299     } else {
300       promote_object = true;
301     }
302   }
303 
304   if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
305     assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
306     if (_old_compact_point + obj_size > _old_to_region->end()) {
307       ShenandoahHeapRegion* new_to_region;
308 
309       log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
310       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _old_to_region->index(),
311               p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
312 
313       // Object does not fit.  Get a new _old_to_region.
314       finish_old_region();
315       if (_empty_regions_pos < _empty_regions.length()) {
316         new_to_region = _empty_regions.at(_empty_regions_pos);
317         _empty_regions_pos++;
318         new_to_region->set_affiliation(OLD_GENERATION);
319       } else {
320         // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
321         // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
322         // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
323         new_to_region = _from_region;
324       }
325 
326       assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
327       assert(new_to_region != nullptr, "must not be nullptr");
328       _old_to_region = new_to_region;
329       _old_compact_point = _old_to_region->bottom();
330     }
331 
332     // Object fits into current region, record new location, if object does not move:
333     assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
334     shenandoah_assert_not_forwarded(nullptr, p);
335     if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
336       _preserved_marks->push_if_necessary(p, p->mark());
337       FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
338     }
339     _old_compact_point += obj_size;
340   } else {
341     assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
342            "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
343     assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
344 
345     // After full gc compaction, all regions have age 0.  Embed the region's age into the object's age in order to preserve
346     // tenuring progress.
347     if (_heap->is_aging_cycle()) {
348       ShenandoahHeap::increase_object_age(p, from_region_age + 1);
349     } else {
350       ShenandoahHeap::increase_object_age(p, from_region_age);
351     }
352 
353     if (_young_compact_point + obj_size > _young_to_region->end()) {
354       ShenandoahHeapRegion* new_to_region;
355 
356       log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
357       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _young_to_region->index(),
358               p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
359 
360       // Object does not fit.  Get a new _young_to_region.
361       finish_young_region();
362       if (_empty_regions_pos < _empty_regions.length()) {
363         new_to_region = _empty_regions.at(_empty_regions_pos);
364         _empty_regions_pos++;
365         new_to_region->set_affiliation(YOUNG_GENERATION);
366       } else {
367         // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
368         // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
369         // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
370         new_to_region = _from_region;
371       }
372 
373       assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
374       assert(new_to_region != nullptr, "must not be nullptr");
375       _young_to_region = new_to_region;
376       _young_compact_point = _young_to_region->bottom();
377     }
378 
379     // Object fits into current region, record new location, if object does not move:
380     assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
381     shenandoah_assert_not_forwarded(nullptr, p);
382 
383     if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
384       _preserved_marks->push_if_necessary(p, p->mark());
385       FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
386     }
387     _young_compact_point += obj_size;
388   }
389 }