1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shared/fullGCForwarding.inline.hpp"
 29 #include "gc/shared/preservedMarks.inline.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
 31 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 32 #include "gc/shenandoah/shenandoahGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 36 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 37 #include "gc/shenandoah/shenandoahUtils.hpp"
 38 
 39 #ifdef ASSERT
 40 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {
 41   assert(generation->used_regions_size() <= generation->max_capacity(),
 42          "%s generation affiliated regions must be less than capacity", generation->name());
 43 }
 44 
 45 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {
 46   assert(generation->used_including_humongous_waste() <= generation->used_regions_size(),
 47          "%s consumed can be no larger than span of affiliated regions", generation->name());
 48 }
 49 #else
 50 void assert_regions_used_not_more_than_capacity(ShenandoahGeneration* generation) {}
 51 void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {}
 52 #endif
 53 
 54 
 55 void ShenandoahGenerationalFullGC::prepare() {
 56   auto heap = ShenandoahGenerationalHeap::heap();
 57   // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
 58   heap->set_gc_generation(heap->global_generation());
 59   heap->set_active_generation();
 60 
 61   // No need for old_gen->increase_used() as this was done when plabs were allocated.
 62   heap->reset_generation_reserves();
 63 
 64   // Full GC supersedes any marking or coalescing in old generation.
 65   heap->old_generation()->cancel_gc();
 66 }
 67 
 68 void ShenandoahGenerationalFullGC::handle_completion(ShenandoahHeap* heap) {
 69   // Full GC should reset time since last gc for young and old heuristics
 70   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
 71   ShenandoahYoungGeneration* young = gen_heap->young_generation();
 72   ShenandoahOldGeneration* old = gen_heap->old_generation();
 73   young->heuristics()->record_cycle_end();
 74   old->heuristics()->record_cycle_end();
 75 
 76   gen_heap->mmu_tracker()->record_full(GCId::current());
 77   gen_heap->log_heap_status("At end of Full GC");
 78 
 79   assert(old->is_idle(), "After full GC, old generation should be idle.");
 80 
 81   // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are
 82   // made valid by the time Full GC completes.
 83   assert_regions_used_not_more_than_capacity(old);
 84   assert_regions_used_not_more_than_capacity(young);
 85   assert_usage_not_more_than_regions_used(old);
 86   assert_usage_not_more_than_regions_used(young);
 87 
 88   // Establish baseline for next old-has-grown trigger.
 89   old->set_live_bytes_after_last_mark(old->used_including_humongous_waste());
 90 }
 91 
 92 void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) {
 93   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
 94   ShenandoahRegionIterator regions;
 95   ShenandoahReconstructRememberedSetTask task(&regions);
 96   heap->workers()->run_task(&task);
 97 
 98   // Rebuilding the remembered set recomputes all the card offsets for objects.
 99   // The adjust pointers phase coalesces and fills all necessary regions. In case
100   // we came to the full GC from an incomplete global cycle, we need to indicate
101   // that the old regions are parsable.
102   heap->old_generation()->set_parsable(true);
103 }
104 
105 void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) {
106   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap);
107   ShenandoahOldGeneration* const old_gen = gen_heap->old_generation();
108 
109   size_t old_usage = old_gen->used_regions_size();
110   size_t old_capacity = old_gen->max_capacity();
111 
112   assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must align with region size");
113   assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must align with region size");
114 
115   if (old_capacity > old_usage) {
116     size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes();
117     gen_heap->generation_sizer()->transfer_to_young(excess_old_regions);
118   } else if (old_capacity < old_usage) {
119     size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes();
120     gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit);
121   }
122 
123   log_info(gc, ergo)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT,
124                PROPERFMTARGS(gen_heap->young_generation()->used()),
125                PROPERFMTARGS(old_gen->used()));
126 }
127 
128 void ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() {
129   auto result = ShenandoahGenerationalHeap::heap()->balance_generations();
130   LogTarget(Info, gc, ergo) lt;
131   if (lt.is_enabled()) {
132     LogStream ls(lt);
133     result.print_on("Full GC", &ls);
134   }
135 }
136 
137 void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) {
138   LogTarget(Debug, gc) lt;
139   if (lt.is_enabled()) {
140     size_t live_bytes_in_old = 0;
141     for (size_t i = 0; i < heap->num_regions(); i++) {
142       ShenandoahHeapRegion* r = heap->get_region(i);
143       if (r->is_old()) {
144         live_bytes_in_old += r->get_live_data_bytes();
145       }
146     }
147     log_debug(gc)("Live bytes in old after STW mark: " PROPERFMT, PROPERFMTARGS(live_bytes_in_old));
148   }
149 }
150 
151 void ShenandoahGenerationalFullGC::restore_top_before_promote(ShenandoahHeap* heap) {
152   for (size_t i = 0; i < heap->num_regions(); i++) {
153     ShenandoahHeapRegion* r = heap->get_region(i);
154     if (r->get_top_before_promote() != nullptr) {
155       r->restore_top_before_promote();
156     }
157   }
158 }
159 
160 void ShenandoahGenerationalFullGC::account_for_region(ShenandoahHeapRegion* r, size_t &region_count, size_t &region_usage, size_t &humongous_waste) {
161   region_count++;
162   region_usage += r->used();
163   if (r->is_humongous_start()) {
164     // For each humongous object, we take this path once regardless of how many regions it spans.
165     HeapWord* obj_addr = r->bottom();
166     oop obj = cast_to_oop(obj_addr);
167     size_t word_size = obj->size();
168     size_t region_size_words = ShenandoahHeapRegion::region_size_words();
169     size_t overreach = word_size % region_size_words;
170     if (overreach != 0) {
171       humongous_waste += (region_size_words - overreach) * HeapWordSize;
172     }
173     // else, this humongous object aligns exactly on region size, so no waste.
174   }
175 }
176 
177 void ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(ShenandoahHeapRegion* r) {
178   if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) {
179     r->begin_preemptible_coalesce_and_fill();
180     r->oop_coalesce_and_fill(false);
181   }
182 }
183 
184 void ShenandoahGenerationalFullGC::compute_balances() {
185   auto heap = ShenandoahGenerationalHeap::heap();
186 
187   // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
188   heap->old_generation()->set_promotion_potential(0);
189   // Invoke this in case we are able to transfer memory from OLD to YOUNG.
190   heap->compute_old_generation_balance(0, 0);
191 }
192 
193 ShenandoahPrepareForGenerationalCompactionObjectClosure::ShenandoahPrepareForGenerationalCompactionObjectClosure(PreservedMarks* preserved_marks,
194                                                           GrowableArray<ShenandoahHeapRegion*>& empty_regions,
195                                                           ShenandoahHeapRegion* from_region, uint worker_id) :
196         _preserved_marks(preserved_marks),
197         _heap(ShenandoahGenerationalHeap::heap()),
198         _tenuring_threshold(0),
199         _empty_regions(empty_regions),
200         _empty_regions_pos(0),
201         _old_to_region(nullptr),
202         _young_to_region(nullptr),
203         _from_region(nullptr),
204         _from_affiliation(ShenandoahAffiliation::FREE),
205         _old_compact_point(nullptr),
206         _young_compact_point(nullptr),
207         _worker_id(worker_id) {
208   assert(from_region != nullptr, "Worker needs from_region");
209   // assert from_region has live?
210   if (from_region->is_old()) {
211     _old_to_region = from_region;
212     _old_compact_point = from_region->bottom();
213   } else if (from_region->is_young()) {
214     _young_to_region = from_region;
215     _young_compact_point = from_region->bottom();
216   }
217 
218   _tenuring_threshold = _heap->age_census()->tenuring_threshold();
219 }
220 
221 void ShenandoahPrepareForGenerationalCompactionObjectClosure::set_from_region(ShenandoahHeapRegion* from_region) {
222   log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live",
223                 _worker_id, from_region->affiliation_name(),
224                 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
225 
226   _from_region = from_region;
227   _from_affiliation = from_region->affiliation();
228   if (_from_region->has_live()) {
229     if (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION) {
230       if (_old_to_region == nullptr) {
231         _old_to_region = from_region;
232         _old_compact_point = from_region->bottom();
233       }
234     } else {
235       assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
236       if (_young_to_region == nullptr) {
237         _young_to_region = from_region;
238         _young_compact_point = from_region->bottom();
239       }
240     }
241   } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
242 }
243 
244 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish() {
245   finish_old_region();
246   finish_young_region();
247 }
248 
249 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_old_region() {
250   if (_old_to_region != nullptr) {
251     log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u",
252             _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
253     _old_to_region->set_new_top(_old_compact_point);
254     _old_to_region = nullptr;
255   }
256 }
257 
258 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
259   if (_young_to_region != nullptr) {
260     log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
261             _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
262     _young_to_region->set_new_top(_young_compact_point);
263     _young_to_region = nullptr;
264   }
265 }
266 
267 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
268   return (_from_region == _old_to_region) || (_from_region == _young_to_region);
269 }
270 
271 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
272   assert(_from_region != nullptr, "must set before work");
273   assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
274          "Object must reside in _from_region");
275   assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
276   assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
277 
278   size_t old_size = p->size();
279   size_t new_size = p->copy_size(old_size, p->mark());
280   uint from_region_age = _from_region->age();
281   uint object_age = p->age();
282 
283   bool promote_object = false;
284   if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
285       (from_region_age + object_age >= _tenuring_threshold)) {
286     if ((_old_to_region != nullptr) && (_old_compact_point + new_size > _old_to_region->end())) {
287       finish_old_region();
288       _old_to_region = nullptr;
289     }
290     if (_old_to_region == nullptr) {
291       if (_empty_regions_pos < _empty_regions.length()) {
292         ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
293         _empty_regions_pos++;
294         new_to_region->set_affiliation(OLD_GENERATION);
295         _old_to_region = new_to_region;
296         _old_compact_point = _old_to_region->bottom();
297         promote_object = true;
298       }
299       // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
300       // we leave promote_object as false, deferring the promotion.
301     } else {
302       promote_object = true;
303     }
304   }
305 
306   if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
307     assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
308     size_t obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
309     if (_old_compact_point + obj_size > _old_to_region->end()) {
310       ShenandoahHeapRegion* new_to_region;
311 
312       log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
313       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _old_to_region->index(),
314               p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
315 
316       // Object does not fit.  Get a new _old_to_region.
317       finish_old_region();
318       if (_empty_regions_pos < _empty_regions.length()) {
319         new_to_region = _empty_regions.at(_empty_regions_pos);
320         _empty_regions_pos++;
321         new_to_region->set_affiliation(OLD_GENERATION);
322       } else {
323         // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
324         // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
325         // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
326         new_to_region = _from_region;
327       }
328 
329       assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
330       assert(new_to_region != nullptr, "must not be nullptr");
331       _old_to_region = new_to_region;
332       _old_compact_point = _old_to_region->bottom();
333       obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
334     }
335 
336     // Object fits into current region, record new location, if object does not move:
337     assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
338     shenandoah_assert_not_forwarded(nullptr, p);
339     if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
340       _preserved_marks->push_if_necessary(p, p->mark());
341       FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
342     }
343     _old_compact_point += obj_size;
344   } else {
345     assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
346            "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
347     assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
348 
349     // After full gc compaction, all regions have age 0.  Embed the region's age into the object's age in order to preserve
350     // tenuring progress.
351     if (_heap->is_aging_cycle()) {
352       ShenandoahHeap::increase_object_age(p, from_region_age + 1);
353     } else {
354       ShenandoahHeap::increase_object_age(p, from_region_age);
355     }
356 
357     size_t obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
358     if (_young_compact_point + obj_size > _young_to_region->end()) {
359       ShenandoahHeapRegion* new_to_region;
360 
361       log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
362       ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT,  _worker_id, _young_to_region->index(),
363               p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
364 
365       // Object does not fit.  Get a new _young_to_region.
366       finish_young_region();
367       if (_empty_regions_pos < _empty_regions.length()) {
368         new_to_region = _empty_regions.at(_empty_regions_pos);
369         _empty_regions_pos++;
370         new_to_region->set_affiliation(YOUNG_GENERATION);
371       } else {
372         // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
373         // from _from_region.  That's because there is always room for _from_region to be compacted into itself.
374         // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
375         new_to_region = _from_region;
376       }
377 
378       assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
379       assert(new_to_region != nullptr, "must not be nullptr");
380       _young_to_region = new_to_region;
381       obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
382       _young_compact_point = _young_to_region->bottom();
383     }
384 
385     // Object fits into current region, record new location, if object does not move:
386     assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
387     shenandoah_assert_not_forwarded(nullptr, p);
388 
389     if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
390       _preserved_marks->push_if_necessary(p, p->mark());
391       FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
392     }
393     _young_compact_point += obj_size;
394   }
395 }