1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahAsserts.hpp"
 28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 29 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahPacer.hpp"
 34 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 36 #include "gc/shenandoah/shenandoahUtils.hpp"
 37 
 38 class ShenandoahConcurrentEvacuator : public ObjectClosure {
 39 private:
 40   ShenandoahGenerationalHeap* const _heap;
 41   Thread* const _thread;
 42 public:
 43   explicit ShenandoahConcurrentEvacuator(ShenandoahGenerationalHeap* heap) :
 44           _heap(heap), _thread(Thread::current()) {}
 45 
 46   void do_object(oop p) override {
 47     shenandoah_assert_marked(nullptr, p);
 48     if (!p->is_forwarded()) {
 49       _heap->evacuate_object(p, _thread);
 50     }
 51   }
 52 };
 53 
 54 ShenandoahGenerationalEvacuationTask::ShenandoahGenerationalEvacuationTask(ShenandoahGenerationalHeap* heap,
 55                                                                            ShenandoahRegionIterator* iterator,
 56                                                                            bool concurrent, bool only_promote_regions) :
 57   WorkerTask("Shenandoah Evacuation"),
 58   _heap(heap),
 59   _regions(iterator),
 60   _concurrent(concurrent),
 61   _only_promote_regions(only_promote_regions),
 62   _tenuring_threshold(0)
 63 {
 64   shenandoah_assert_generational();
 65   _tenuring_threshold = _heap->age_census()->tenuring_threshold();
 66 }
 67 
 68 void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
 69   if (_concurrent) {
 70     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 71     ShenandoahSuspendibleThreadSetJoiner stsj;
 72     do_work();
 73   } else {
 74     ShenandoahParallelWorkerSession worker_session(worker_id);
 75     do_work();
 76   }
 77 }
 78 
 79 void ShenandoahGenerationalEvacuationTask::do_work() {
 80   if (_only_promote_regions) {
 81     // No allocations will be made, do not enter oom-during-evac protocol.
 82     assert(ShenandoahHeap::heap()->collection_set()->is_empty(), "Should not have a collection set here");
 83     promote_regions();
 84   } else {
 85     assert(!ShenandoahHeap::heap()->collection_set()->is_empty(), "Should have a collection set here");
 86     ShenandoahEvacOOMScope oom_evac_scope;
 87     evacuate_and_promote_regions();
 88   }
 89 }
 90 
 91 void log_region(const ShenandoahHeapRegion* r, LogStream* ls) {
 92   ls->print_cr("GenerationalEvacuationTask, looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
 93               r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
 94               r->is_active()? "active": "inactive",
 95               r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
 96               r->is_cset()? "cset": "not-cset");
 97 }
 98 
 99 void ShenandoahGenerationalEvacuationTask::promote_regions() {
100   ShenandoahHeapRegion* r;
101   LogTarget(Debug, gc) lt;
102 
103   while ((r = _regions->next()) != nullptr) {
104     if (lt.is_enabled()) {
105       LogStream ls(lt);
106       log_region(r, &ls);
107     }
108 
109     maybe_promote_region(r);
110 
111     if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
112       break;
113     }
114   }
115 }
116 
117 void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
118   LogTarget(Debug, gc) lt;
119   ShenandoahConcurrentEvacuator cl(_heap);
120   ShenandoahHeapRegion* r;
121 
122   while ((r = _regions->next()) != nullptr) {
123     if (lt.is_enabled()) {
124       LogStream ls(lt);
125       log_region(r, &ls);
126     }
127 
128     if (r->is_cset()) {
129       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
130       _heap->marked_object_iterate(r, &cl);
131       if (ShenandoahPacing) {
132         _heap->pacer()->report_evac(r->used() >> LogHeapWordSize);
133       }
134     } else {
135       maybe_promote_region(r);
136     }
137 
138     if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
139       break;
140     }
141   }
142 }
143 
144 
145 void ShenandoahGenerationalEvacuationTask::maybe_promote_region(ShenandoahHeapRegion* r) {
146   if (r->is_young() && r->is_active() && (r->age() >= _tenuring_threshold)) {
147     if (r->is_humongous_start()) {
148       // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
149       // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
150       // triggers the load-reference barrier (LRB) to copy on reference fetch.
151       //
152       // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
153       // more garbage than ShenandoahOldGarbageThreshold, we'll promote by evacuation.  If there is room for evacuation
154       // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
155       // by evacuation in some future GC cycle.
156       promote_humongous(r);
157     } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
158       // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
159       // the LRB to copy on reference fetch.
160       //
161       // If an aged regular region has received allocations during the current cycle, we do not promote because the
162       // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
163       promote_in_place(r);
164     }
165   }
166 }
167 
168 // When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
169 // set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
170 // We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting pointers"
171 // contained herein.
172 void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion* region) {
173   assert(!_heap->gc_generation()->is_old(), "Sanity check");
174   ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context();
175   HeapWord* const tams = marking_context->top_at_mark_start(region);
176 
177   {
178     const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
179     shenandoah_assert_generations_reconciled();
180     assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
181     assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region " SIZE_FORMAT " has too much garbage for promotion", region->index());
182     assert(region->is_young(), "Only young regions can be promoted");
183     assert(region->is_regular(), "Use different service to promote humongous regions");
184     assert(region->age() >= _tenuring_threshold, "Only promote regions that are sufficiently aged");
185     assert(region->get_top_before_promote() == tams, "Region " SIZE_FORMAT " has been used for allocations before promotion", region->index());
186   }
187 
188   ShenandoahOldGeneration* const old_gen = _heap->old_generation();
189   ShenandoahYoungGeneration* const young_gen = _heap->young_generation();
190 
191   // Rebuild the remembered set information and mark the entire range as DIRTY.  We do NOT scan the content of this
192   // range to determine which cards need to be DIRTY.  That would force us to scan the region twice, once now, and
193   // once during the subsequent remembered set scan.  Instead, we blindly (conservatively) mark everything as DIRTY
194   // now and then sort out the CLEAN pages during the next remembered set scan.
195   //
196   // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
197   // then registering every live object and every coalesced range of free objects in the loop that follows.
198   ShenandoahScanRemembered* const scanner = old_gen->card_scan();
199   scanner->reset_object_range(region->bottom(), region->end());
200   scanner->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
201 
202   HeapWord* obj_addr = region->bottom();
203   while (obj_addr < tams) {
204     oop obj = cast_to_oop(obj_addr);
205     if (marking_context->is_marked(obj)) {
206       assert(obj->klass() != nullptr, "klass should not be NULL");
207       // This thread is responsible for registering all objects in this region.  No need for lock.
208       scanner->register_object_without_lock(obj_addr);
209       obj_addr += obj->size();
210     } else {
211       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
212       assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
213       size_t fill_size = next_marked_obj - obj_addr;
214       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
215       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
216       scanner->register_object_without_lock(obj_addr);
217       obj_addr = next_marked_obj;
218     }
219   }
220   // We do not need to scan above TAMS because restored top equals tams
221   assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
222 
223 
224   {
225     ShenandoahHeapLocker locker(_heap->lock());
226 
227     HeapWord* update_watermark = region->get_update_watermark();
228 
229     // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
230     // is_collector_free range.
231     region->restore_top_before_promote();
232 
233     size_t region_used = region->used();
234 
235     // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
236     assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
237     region->set_update_watermark(region->top());
238 
239     // Unconditionally transfer one region from young to old. This represents the newly promoted region.
240     // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
241     // if there are already enough unaffiliated regions in old to account for this newly promoted region.
242     // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
243     // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
244     // we would be trading a fully empty region for a partially used region.
245     young_gen->decrease_used(region_used);
246     young_gen->decrement_affiliated_region_count();
247 
248     // transfer_to_old() increases capacity of old and decreases capacity of young
249     _heap->generation_sizer()->force_transfer_to_old(1);
250     region->set_affiliation(OLD_GENERATION);
251 
252     old_gen->increment_affiliated_region_count();
253     old_gen->increase_used(region_used);
254 
255     // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size()
256     _heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
257   }
258 }
259 
260 void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegion* region) {
261   ShenandoahMarkingContext* marking_context = _heap->marking_context();
262   oop obj = cast_to_oop(region->bottom());
263   assert(_heap->gc_generation()->is_mark_complete(), "sanity");
264   shenandoah_assert_generations_reconciled();
265   assert(region->is_young(), "Only young regions can be promoted");
266   assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
267   assert(region->age() >= _tenuring_threshold, "Only promote regions that are sufficiently aged");
268   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
269 
270   const size_t used_bytes = obj->size() * HeapWordSize;
271   const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
272   const size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
273   const size_t index_limit = region->index() + spanned_regions;
274 
275   ShenandoahOldGeneration* const old_gen = _heap->old_generation();
276   ShenandoahGeneration* const young_gen = _heap->young_generation();
277   {
278     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
279     // young to old.
280     ShenandoahHeapLocker locker(_heap->lock());
281 
282     // We promote humongous objects unconditionally, without checking for availability.  We adjust
283     // usage totals, including humongous waste, after evacuation is done.
284     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, region->index(), spanned_regions);
285 
286     young_gen->decrease_used(used_bytes);
287     young_gen->decrease_humongous_waste(humongous_waste);
288     young_gen->decrease_affiliated_region_count(spanned_regions);
289 
290     // transfer_to_old() increases capacity of old and decreases capacity of young
291     _heap->generation_sizer()->force_transfer_to_old(spanned_regions);
292 
293     // For this region and each humongous continuation region spanned by this humongous object, change
294     // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
295     // in the last humongous region that is not spanned by obj is currently not used.
296     for (size_t i = region->index(); i < index_limit; i++) {
297       ShenandoahHeapRegion* r = _heap->get_region(i);
298       log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
299               r->index(), p2i(r->bottom()), p2i(r->top()));
300       // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
301       r->set_affiliation(OLD_GENERATION);
302     }
303 
304     old_gen->increase_affiliated_region_count(spanned_regions);
305     old_gen->increase_used(used_bytes);
306     old_gen->increase_humongous_waste(humongous_waste);
307   }
308 
309   // Since this region may have served previously as OLD, it may hold obsolete object range info.
310   HeapWord* const humongous_bottom = region->bottom();
311   ShenandoahScanRemembered* const scanner = old_gen->card_scan();
312   scanner->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
313   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
314   scanner->register_object_without_lock(humongous_bottom);
315 
316   if (obj->is_typeArray()) {
317     // Primitive arrays don't need to be scanned.
318     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
319             region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
320     scanner->mark_range_as_clean(humongous_bottom, obj->size());
321   } else {
322     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
323             region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
324     scanner->mark_range_as_dirty(humongous_bottom, obj->size());
325   }
326 }
327