1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahAsserts.hpp"
 28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 29 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahPacer.hpp"
 34 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 36 #include "gc/shenandoah/shenandoahUtils.hpp"
 37 
 38 class ShenandoahConcurrentEvacuator : public ObjectClosure {
 39 private:
 40   ShenandoahGenerationalHeap* const _heap;
 41   Thread* const _thread;
 42 public:
 43   explicit ShenandoahConcurrentEvacuator(ShenandoahGenerationalHeap* heap) :
 44           _heap(heap), _thread(Thread::current()) {}
 45 
 46   void do_object(oop p) override {
 47     shenandoah_assert_marked(nullptr, p);
 48     if (!p->is_forwarded()) {
 49       _heap->evacuate_object(p, _thread);
 50     }
 51   }
 52 };
 53 
 54 ShenandoahGenerationalEvacuationTask::ShenandoahGenerationalEvacuationTask(ShenandoahGenerationalHeap* heap,
 55                                                                            ShenandoahRegionIterator* iterator,
 56                                                                            bool concurrent, bool only_promote_regions) :
 57   WorkerTask("Shenandoah Evacuation"),
 58   _heap(heap),
 59   _regions(iterator),
 60   _concurrent(concurrent),
 61   _only_promote_regions(only_promote_regions)
 62 {
 63   shenandoah_assert_generational();
 64 }
 65 
 66 void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
 67   if (_concurrent) {
 68     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 69     ShenandoahSuspendibleThreadSetJoiner stsj;
 70     do_work();
 71   } else {
 72     ShenandoahParallelWorkerSession worker_session(worker_id);
 73     do_work();
 74   }
 75 }
 76 
 77 void ShenandoahGenerationalEvacuationTask::do_work() {
 78   if (_only_promote_regions) {
 79     // No allocations will be made, do not enter oom-during-evac protocol.
 80     assert(ShenandoahHeap::heap()->collection_set()->is_empty(), "Should not have a collection set here");
 81     promote_regions();
 82   } else {
 83     assert(!ShenandoahHeap::heap()->collection_set()->is_empty(), "Should have a collection set here");
 84     ShenandoahEvacOOMScope oom_evac_scope;
 85     evacuate_and_promote_regions();
 86   }
 87 }
 88 
 89 void log_region(const ShenandoahHeapRegion* r, LogStream* ls) {
 90   ls->print_cr("GenerationalEvacuationTask, looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
 91               r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
 92               r->is_active()? "active": "inactive",
 93               r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
 94               r->is_cset()? "cset": "not-cset");
 95 }
 96 
 97 void ShenandoahGenerationalEvacuationTask::promote_regions() {
 98   ShenandoahHeapRegion* r;
 99   LogTarget(Debug, gc) lt;
100 
101   while ((r = _regions->next()) != nullptr) {
102     if (lt.is_enabled()) {
103       LogStream ls(lt);
104       log_region(r, &ls);
105     }
106 
107     maybe_promote_region(r);
108 
109     if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
110       break;
111     }
112   }
113 }
114 
115 void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
116   LogTarget(Debug, gc) lt;
117   ShenandoahConcurrentEvacuator cl(_heap);
118   ShenandoahHeapRegion* r;
119 
120   while ((r = _regions->next()) != nullptr) {
121     if (lt.is_enabled()) {
122       LogStream ls(lt);
123       log_region(r, &ls);
124     }
125 
126     if (r->is_cset()) {
127       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
128       _heap->marked_object_iterate(r, &cl);
129       if (ShenandoahPacing) {
130         _heap->pacer()->report_evac(r->used() >> LogHeapWordSize);
131       }
132     } else {
133       maybe_promote_region(r);
134     }
135 
136     if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
137       break;
138     }
139   }
140 }
141 
142 
143 void ShenandoahGenerationalEvacuationTask::maybe_promote_region(ShenandoahHeapRegion* r) {
144   if (r->is_young() && r->is_active() && _heap->is_tenurable(r)) {
145     if (r->is_humongous_start()) {
146       // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
147       // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
148       // triggers the load-reference barrier (LRB) to copy on reference fetch.
149       //
150       // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
151       // more garbage than ShenandoahOldGarbageThreshold, we'll promote by evacuation.  If there is room for evacuation
152       // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
153       // by evacuation in some future GC cycle.
154       promote_humongous(r);
155     } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
156       // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
157       // the LRB to copy on reference fetch.
158       //
159       // If an aged regular region has received allocations during the current cycle, we do not promote because the
160       // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
161       promote_in_place(r);
162     }
163   }
164 }
165 
166 // When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
167 // set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
168 // We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting pointers"
169 // contained herein.
170 void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion* region) {
171   assert(!_heap->gc_generation()->is_old(), "Sanity check");
172   ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context();
173   HeapWord* const tams = marking_context->top_at_mark_start(region);
174 
175   {
176     const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
177     shenandoah_assert_generations_reconciled();
178     assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
179     assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region " SIZE_FORMAT " has too much garbage for promotion", region->index());
180     assert(region->is_young(), "Only young regions can be promoted");
181     assert(region->is_regular(), "Use different service to promote humongous regions");
182     assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
183     assert(region->get_top_before_promote() == tams, "Region " SIZE_FORMAT " has been used for allocations before promotion", region->index());
184   }
185 
186   ShenandoahOldGeneration* const old_gen = _heap->old_generation();
187   ShenandoahYoungGeneration* const young_gen = _heap->young_generation();
188 
189   // Rebuild the remembered set information and mark the entire range as DIRTY.  We do NOT scan the content of this
190   // range to determine which cards need to be DIRTY.  That would force us to scan the region twice, once now, and
191   // once during the subsequent remembered set scan.  Instead, we blindly (conservatively) mark everything as DIRTY
192   // now and then sort out the CLEAN pages during the next remembered set scan.
193   //
194   // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
195   // then registering every live object and every coalesced range of free objects in the loop that follows.
196   ShenandoahScanRemembered* const scanner = old_gen->card_scan();
197   scanner->reset_object_range(region->bottom(), region->end());
198   scanner->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
199 
200   HeapWord* obj_addr = region->bottom();
201   while (obj_addr < tams) {
202     oop obj = cast_to_oop(obj_addr);
203     if (marking_context->is_marked(obj)) {
204       assert(obj->klass() != nullptr, "klass should not be NULL");
205       // This thread is responsible for registering all objects in this region.  No need for lock.
206       scanner->register_object_without_lock(obj_addr);
207       obj_addr += obj->size();
208     } else {
209       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
210       assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
211       size_t fill_size = next_marked_obj - obj_addr;
212       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
213       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
214       scanner->register_object_without_lock(obj_addr);
215       obj_addr = next_marked_obj;
216     }
217   }
218   // We do not need to scan above TAMS because restored top equals tams
219   assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
220 
221 
222   {
223     ShenandoahHeapLocker locker(_heap->lock());
224 
225     HeapWord* update_watermark = region->get_update_watermark();
226 
227     // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
228     // is_collector_free range.
229     region->restore_top_before_promote();
230 
231     size_t region_used = region->used();
232 
233     // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
234     assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
235     region->set_update_watermark(region->top());
236 
237     // Unconditionally transfer one region from young to old. This represents the newly promoted region.
238     // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
239     // if there are already enough unaffiliated regions in old to account for this newly promoted region.
240     // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
241     // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
242     // we would be trading a fully empty region for a partially used region.
243     young_gen->decrease_used(region_used);
244     young_gen->decrement_affiliated_region_count();
245 
246     // transfer_to_old() increases capacity of old and decreases capacity of young
247     _heap->generation_sizer()->force_transfer_to_old(1);
248     region->set_affiliation(OLD_GENERATION);
249 
250     old_gen->increment_affiliated_region_count();
251     old_gen->increase_used(region_used);
252 
253     // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size()
254     _heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
255   }
256 }
257 
258 void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegion* region) {
259   ShenandoahMarkingContext* marking_context = _heap->marking_context();
260   oop obj = cast_to_oop(region->bottom());
261   assert(_heap->gc_generation()->is_mark_complete(), "sanity");
262   shenandoah_assert_generations_reconciled();
263   assert(region->is_young(), "Only young regions can be promoted");
264   assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
265   assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
266   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
267 
268   const size_t used_bytes = obj->size() * HeapWordSize;
269   const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
270   const size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
271   const size_t index_limit = region->index() + spanned_regions;
272 
273   ShenandoahOldGeneration* const old_gen = _heap->old_generation();
274   ShenandoahGeneration* const young_gen = _heap->young_generation();
275   {
276     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
277     // young to old.
278     ShenandoahHeapLocker locker(_heap->lock());
279 
280     // We promote humongous objects unconditionally, without checking for availability.  We adjust
281     // usage totals, including humongous waste, after evacuation is done.
282     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, region->index(), spanned_regions);
283 
284     young_gen->decrease_used(used_bytes);
285     young_gen->decrease_humongous_waste(humongous_waste);
286     young_gen->decrease_affiliated_region_count(spanned_regions);
287 
288     // transfer_to_old() increases capacity of old and decreases capacity of young
289     _heap->generation_sizer()->force_transfer_to_old(spanned_regions);
290 
291     // For this region and each humongous continuation region spanned by this humongous object, change
292     // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
293     // in the last humongous region that is not spanned by obj is currently not used.
294     for (size_t i = region->index(); i < index_limit; i++) {
295       ShenandoahHeapRegion* r = _heap->get_region(i);
296       log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
297               r->index(), p2i(r->bottom()), p2i(r->top()));
298       // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
299       r->set_affiliation(OLD_GENERATION);
300     }
301 
302     old_gen->increase_affiliated_region_count(spanned_regions);
303     old_gen->increase_used(used_bytes);
304     old_gen->increase_humongous_waste(humongous_waste);
305   }
306 
307   // Since this region may have served previously as OLD, it may hold obsolete object range info.
308   HeapWord* const humongous_bottom = region->bottom();
309   ShenandoahScanRemembered* const scanner = old_gen->card_scan();
310   scanner->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
311   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
312   scanner->register_object_without_lock(humongous_bottom);
313 
314   if (obj->is_typeArray()) {
315     // Primitive arrays don't need to be scanned.
316     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
317             region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
318     scanner->mark_range_as_clean(humongous_bottom, obj->size());
319   } else {
320     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
321             region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
322     scanner->mark_range_as_dirty(humongous_bottom, obj->size());
323   }
324 }
325