1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahAsserts.hpp"
 28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 29 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahPacer.hpp"
 34 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 36 #include "gc/shenandoah/shenandoahUtils.hpp"
 37 
 38 class ShenandoahConcurrentEvacuator : public ObjectClosure {
 39 private:
 40   ShenandoahGenerationalHeap* const _heap;
 41   Thread* const _thread;
 42 public:
 43   explicit ShenandoahConcurrentEvacuator(ShenandoahGenerationalHeap* heap) :
 44           _heap(heap), _thread(Thread::current()) {}
 45 
 46   void do_object(oop p) override {
 47     shenandoah_assert_marked(nullptr, p);
 48     if (!p->is_forwarded()) {
 49       _heap->evacuate_object(p, _thread);
 50     }
 51   }
 52 };
 53 
 54 ShenandoahGenerationalEvacuationTask::ShenandoahGenerationalEvacuationTask(ShenandoahGenerationalHeap* heap,
 55                                                                            ShenandoahRegionIterator* iterator,
 56                                                                            bool concurrent, bool only_promote_regions) :
 57   WorkerTask("Shenandoah Evacuation"),
 58   _heap(heap),
 59   _regions(iterator),
 60   _concurrent(concurrent),
 61   _only_promote_regions(only_promote_regions),
 62   _tenuring_threshold(0)
 63 {
 64   shenandoah_assert_generational();
 65   _tenuring_threshold = _heap->age_census()->tenuring_threshold();
 66 }
 67 
 68 void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
 69   if (_concurrent) {
 70     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 71     ShenandoahSuspendibleThreadSetJoiner stsj;
 72     do_work();
 73   } else {
 74     ShenandoahParallelWorkerSession worker_session(worker_id);
 75     do_work();
 76   }
 77 }
 78 
 79 void ShenandoahGenerationalEvacuationTask::do_work() {
 80   if (_only_promote_regions) {
 81     // No allocations will be made, do not enter oom-during-evac protocol.
 82     assert(ShenandoahHeap::heap()->collection_set()->is_empty(), "Should not have a collection set here");
 83     promote_regions();
 84   } else {
 85     assert(!ShenandoahHeap::heap()->collection_set()->is_empty(), "Should have a collection set here");
 86     ShenandoahEvacOOMScope oom_evac_scope;
 87     evacuate_and_promote_regions();
 88   }
 89 }
 90 
 91 void log_region(const ShenandoahHeapRegion* r, LogStream* ls) {
 92   ls->print_cr("GenerationalEvacuationTask, looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
 93               r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
 94               r->is_active()? "active": "inactive",
 95               r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
 96               r->is_cset()? "cset": "not-cset");
 97 }
 98 
 99 void ShenandoahGenerationalEvacuationTask::promote_regions() {
100   ShenandoahHeapRegion* r;
101   LogTarget(Debug, gc) lt;
102 
103   while ((r = _regions->next()) != nullptr) {
104     if (lt.is_enabled()) {
105       LogStream ls(lt);
106       log_region(r, &ls);
107     }
108 
109     maybe_promote_region(r);
110 
111     if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
112       break;
113     }
114   }
115 }
116 
117 void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
118   LogTarget(Debug, gc) lt;
119   ShenandoahConcurrentEvacuator cl(_heap);
120   ShenandoahHeapRegion* r;
121 
122   while ((r = _regions->next()) != nullptr) {
123     if (lt.is_enabled()) {
124       LogStream ls(lt);
125       log_region(r, &ls);
126     }
127 
128     if (r->is_cset()) {
129       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
130       _heap->marked_object_iterate(r, &cl);
131       if (ShenandoahPacing) {
132         _heap->pacer()->report_evac(r->used() >> LogHeapWordSize);
133       }
134     } else {
135       maybe_promote_region(r);
136     }
137 
138     if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
139       break;
140     }
141   }
142 }
143 
144 
145 void ShenandoahGenerationalEvacuationTask::maybe_promote_region(ShenandoahHeapRegion* r) {
146   if (r->is_young() && r->is_active() && (r->age() >= _tenuring_threshold)) {
147     if (r->is_humongous_start()) {
148       // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
149       // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
150       // triggers the load-reference barrier (LRB) to copy on reference fetch.
151       //
152       // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
153       // more garbage than ShenandoahOldGarbageThreshold, we'll promote by evacuation.  If there is room for evacuation
154       // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
155       // by evacuation in some future GC cycle.
156       promote_humongous(r);
157     } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
158       // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
159       // the LRB to copy on reference fetch.
160       //
161       // If an aged regular region has received allocations during the current cycle, we do not promote because the
162       // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
163       promote_in_place(r);
164     }
165   }
166 }
167 
168 // When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
169 // set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
170 // We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting pointers"
171 // contained herein.
172 void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion* region) {
173   ShenandoahMarkingContext* const marking_context = _heap->complete_marking_context();
174   HeapWord* const tams = marking_context->top_at_mark_start(region);
175 
176   {
177     const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
178     shenandoah_assert_generations_reconciled();
179     assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
180     assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region " SIZE_FORMAT " has too much garbage for promotion", region->index());
181     assert(region->is_young(), "Only young regions can be promoted");
182     assert(region->is_regular(), "Use different service to promote humongous regions");
183     assert(region->age() >= _tenuring_threshold, "Only promote regions that are sufficiently aged");
184     assert(region->get_top_before_promote() == tams, "Region " SIZE_FORMAT " has been used for allocations before promotion", region->index());
185   }
186 
187   ShenandoahOldGeneration* const old_gen = _heap->old_generation();
188   ShenandoahYoungGeneration* const young_gen = _heap->young_generation();
189 
190   // Rebuild the remembered set information and mark the entire range as DIRTY.  We do NOT scan the content of this
191   // range to determine which cards need to be DIRTY.  That would force us to scan the region twice, once now, and
192   // once during the subsequent remembered set scan.  Instead, we blindly (conservatively) mark everything as DIRTY
193   // now and then sort out the CLEAN pages during the next remembered set scan.
194   //
195   // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
196   // then registering every live object and every coalesced range of free objects in the loop that follows.
197   ShenandoahScanRemembered* const scanner = old_gen->card_scan();
198   scanner->reset_object_range(region->bottom(), region->end());
199   scanner->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
200 
201   HeapWord* obj_addr = region->bottom();
202   while (obj_addr < tams) {
203     oop obj = cast_to_oop(obj_addr);
204     if (marking_context->is_marked(obj)) {
205       assert(obj->klass() != nullptr, "klass should not be NULL");
206       // This thread is responsible for registering all objects in this region.  No need for lock.
207       scanner->register_object_without_lock(obj_addr);
208       obj_addr += obj->size();
209     } else {
210       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
211       assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
212       size_t fill_size = next_marked_obj - obj_addr;
213       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
214       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
215       scanner->register_object_without_lock(obj_addr);
216       obj_addr = next_marked_obj;
217     }
218   }
219   // We do not need to scan above TAMS because restored top equals tams
220   assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
221 
222 
223   {
224     ShenandoahHeapLocker locker(_heap->lock());
225 
226     HeapWord* update_watermark = region->get_update_watermark();
227 
228     // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
229     // is_collector_free range.
230     region->restore_top_before_promote();
231 
232     size_t region_used = region->used();
233 
234     // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
235     assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
236     region->set_update_watermark(region->top());
237 
238     // Unconditionally transfer one region from young to old. This represents the newly promoted region.
239     // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
240     // if there are already enough unaffiliated regions in old to account for this newly promoted region.
241     // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
242     // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
243     // we would be trading a fully empty region for a partially used region.
244     young_gen->decrease_used(region_used);
245     young_gen->decrement_affiliated_region_count();
246 
247     // transfer_to_old() increases capacity of old and decreases capacity of young
248     _heap->generation_sizer()->force_transfer_to_old(1);
249     region->set_affiliation(OLD_GENERATION);
250 
251     old_gen->increment_affiliated_region_count();
252     old_gen->increase_used(region_used);
253 
254     // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size()
255     _heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
256   }
257 }
258 
259 void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegion* region) {
260   ShenandoahMarkingContext* marking_context = _heap->marking_context();
261   oop obj = cast_to_oop(region->bottom());
262   assert(_heap->gc_generation()->is_mark_complete(), "sanity");
263   shenandoah_assert_generations_reconciled();
264   assert(region->is_young(), "Only young regions can be promoted");
265   assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
266   assert(region->age() >= _tenuring_threshold, "Only promote regions that are sufficiently aged");
267   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
268 
269   const size_t used_bytes = obj->size() * HeapWordSize;
270   const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
271   const size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
272   const size_t index_limit = region->index() + spanned_regions;
273 
274   ShenandoahOldGeneration* const old_gen = _heap->old_generation();
275   ShenandoahGeneration* const young_gen = _heap->young_generation();
276   {
277     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
278     // young to old.
279     ShenandoahHeapLocker locker(_heap->lock());
280 
281     // We promote humongous objects unconditionally, without checking for availability.  We adjust
282     // usage totals, including humongous waste, after evacuation is done.
283     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, region->index(), spanned_regions);
284 
285     young_gen->decrease_used(used_bytes);
286     young_gen->decrease_humongous_waste(humongous_waste);
287     young_gen->decrease_affiliated_region_count(spanned_regions);
288 
289     // transfer_to_old() increases capacity of old and decreases capacity of young
290     _heap->generation_sizer()->force_transfer_to_old(spanned_regions);
291 
292     // For this region and each humongous continuation region spanned by this humongous object, change
293     // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
294     // in the last humongous region that is not spanned by obj is currently not used.
295     for (size_t i = region->index(); i < index_limit; i++) {
296       ShenandoahHeapRegion* r = _heap->get_region(i);
297       log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
298               r->index(), p2i(r->bottom()), p2i(r->top()));
299       // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
300       r->set_affiliation(OLD_GENERATION);
301     }
302 
303     old_gen->increase_affiliated_region_count(spanned_regions);
304     old_gen->increase_used(used_bytes);
305     old_gen->increase_humongous_waste(humongous_waste);
306   }
307 
308   // Since this region may have served previously as OLD, it may hold obsolete object range info.
309   HeapWord* const humongous_bottom = region->bottom();
310   ShenandoahScanRemembered* const scanner = old_gen->card_scan();
311   scanner->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
312   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
313   scanner->register_object_without_lock(humongous_bottom);
314 
315   if (obj->is_typeArray()) {
316     // Primitive arrays don't need to be scanned.
317     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
318             region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
319     scanner->mark_range_as_clean(humongous_bottom, obj->size());
320   } else {
321     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
322             region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
323     scanner->mark_range_as_dirty(humongous_bottom, obj->size());
324   }
325 }
326