1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahAsserts.hpp"
 28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 29 #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
 30 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahPacer.hpp"
 34 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 36 #include "gc/shenandoah/shenandoahUtils.hpp"
 37 
 38 class ShenandoahConcurrentEvacuator : public ObjectClosure {
 39 private:
 40   ShenandoahGenerationalHeap* const _heap;
 41   Thread* const _thread;
 42 public:
 43   explicit ShenandoahConcurrentEvacuator(ShenandoahGenerationalHeap* heap) :
 44           _heap(heap), _thread(Thread::current()) {}
 45 
 46   void do_object(oop p) override {
 47     shenandoah_assert_marked(nullptr, p);
 48     if (!p->is_forwarded()) {
 49       _heap->evacuate_object(p, _thread);
 50     }
 51   }
 52 };
 53 
 54 ShenandoahGenerationalEvacuationTask::ShenandoahGenerationalEvacuationTask(ShenandoahGenerationalHeap* heap,
 55                                                                            ShenandoahRegionIterator* iterator,
 56                                                                            bool concurrent) :
 57   WorkerTask("Shenandoah Evacuation"),
 58   _heap(heap),
 59   _regions(iterator),
 60   _concurrent(concurrent),
 61   _tenuring_threshold(0)
 62 {
 63   shenandoah_assert_generational();
 64   _tenuring_threshold = _heap->age_census()->tenuring_threshold();
 65 }
 66 
 67 void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
 68   if (_concurrent) {
 69     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 70     ShenandoahSuspendibleThreadSetJoiner stsj;
 71     ShenandoahEvacOOMScope oom_evac_scope;
 72     do_work();
 73   } else {
 74     ShenandoahParallelWorkerSession worker_session(worker_id);
 75     ShenandoahEvacOOMScope oom_evac_scope;
 76     do_work();
 77   }
 78 }
 79 
 80 void ShenandoahGenerationalEvacuationTask::do_work() {
 81   ShenandoahConcurrentEvacuator cl(_heap);
 82   ShenandoahHeapRegion* r;
 83 
 84   while ((r = _regions->next()) != nullptr) {
 85     log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
 86             r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
 87             r->is_active()? "active": "inactive",
 88             r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
 89             r->is_cset()? "cset": "not-cset");
 90 
 91     if (r->is_cset()) {
 92       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 93       _heap->marked_object_iterate(r, &cl);
 94       if (ShenandoahPacing) {
 95         _heap->pacer()->report_evac(r->used() >> LogHeapWordSize);
 96       }
 97     } else if (r->is_young() && r->is_active() && (r->age() >= _tenuring_threshold)) {
 98       if (r->is_humongous_start()) {
 99         // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
100         // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
101         // triggers the load-reference barrier (LRB) to copy on reference fetch.
102         promote_humongous(r);
103       } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
104         // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
105         // the LRB to copy on reference fetch.
106         promote_in_place(r);
107       }
108       // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
109       // more garbage than ShenandoahOldGarbageThreshold, we'll promote by evacuation.  If there is room for evacuation
110       // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
111       // by evacuation in some future GC cycle.
112 
113       // If an aged regular region has received allocations during the current cycle, we do not promote because the
114       // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
115     }
116     // else, region is free, or OLD, or not in collection set, or humongous_continuation,
117     // or is young humongous_start that is too young to be promoted
118     if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
119       break;
120     }
121   }
122 }
123 
124 // When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
125 // set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
126 // We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting poitners"
127 // contained herein.
128 void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion* region) {
129   ShenandoahMarkingContext* const marking_context = _heap->marking_context();
130   HeapWord* const tams = marking_context->top_at_mark_start(region);
131 
132   {
133     const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
134     assert(_heap->active_generation()->is_mark_complete(), "sanity");
135     assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
136     assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region " SIZE_FORMAT " has too much garbage for promotion", region->index());
137     assert(region->is_young(), "Only young regions can be promoted");
138     assert(region->is_regular(), "Use different service to promote humongous regions");
139     assert(region->age() >= _heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
140     assert(region->get_top_before_promote() == tams, "Region " SIZE_FORMAT " has been used for allocations before promotion", region->index());
141   }
142 
143   // Rebuild the remembered set information and mark the entire range as DIRTY.  We do NOT scan the content of this
144   // range to determine which cards need to be DIRTY.  That would force us to scan the region twice, once now, and
145   // once during the subsequent remembered set scan.  Instead, we blindly (conservatively) mark everything as DIRTY
146   // now and then sort out the CLEAN pages during the next remembered set scan.
147   //
148   // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
149   // then registering every live object and every coalesced range of free objects in the loop that follows.
150   _heap->card_scan()->reset_object_range(region->bottom(), region->end());
151   _heap->card_scan()->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
152 
153   // TODO: use an existing coalesce-and-fill function rather than replicating the code here.
154   HeapWord* obj_addr = region->bottom();
155   while (obj_addr < tams) {
156     oop obj = cast_to_oop(obj_addr);
157     if (marking_context->is_marked(obj)) {
158       assert(obj->klass() != nullptr, "klass should not be NULL");
159       // This thread is responsible for registering all objects in this region.  No need for lock.
160       _heap->card_scan()->register_object_without_lock(obj_addr);
161       obj_addr += obj->size();
162     } else {
163       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
164       assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
165       size_t fill_size = next_marked_obj - obj_addr;
166       assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
167       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
168       _heap->card_scan()->register_object_without_lock(obj_addr);
169       obj_addr = next_marked_obj;
170     }
171   }
172   // We do not need to scan above TAMS because restored top equals tams
173   assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
174 
175   ShenandoahOldGeneration* const old_gen = _heap->old_generation();
176   ShenandoahYoungGeneration* const young_gen = _heap->young_generation();
177 
178   {
179     ShenandoahHeapLocker locker(_heap->lock());
180 
181     HeapWord* update_watermark = region->get_update_watermark();
182 
183     // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
184     // is_collector_free range.
185     region->restore_top_before_promote();
186 
187     size_t region_used = region->used();
188 
189     // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
190     assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
191     region->set_update_watermark(region->top());
192 
193     // Unconditionally transfer one region from young to old. This represents the newly promoted region.
194     // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
195     // if there are already enough unaffiliated regions in old to account for this newly promoted region.
196     // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
197     // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
198     // we would be trading a fully empty region for a partially used region.
199     young_gen->decrease_used(region_used);
200     young_gen->decrement_affiliated_region_count();
201 
202     // transfer_to_old() increases capacity of old and decreases capacity of young
203     _heap->generation_sizer()->force_transfer_to_old(1);
204     region->set_affiliation(OLD_GENERATION);
205 
206     old_gen->increment_affiliated_region_count();
207     old_gen->increase_used(region_used);
208 
209     // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size()
210     _heap->free_set()->add_old_collector_free_region(region);
211   }
212 }
213 
214 void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegion* region) {
215   ShenandoahMarkingContext* marking_context = _heap->marking_context();
216   oop obj = cast_to_oop(region->bottom());
217   assert(_heap->active_generation()->is_mark_complete(), "sanity");
218   assert(region->is_young(), "Only young regions can be promoted");
219   assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
220   assert(region->age() >= _heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
221   assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
222 
223   // TODO: Consider not promoting humongous objects that represent primitive arrays.  Leaving a primitive array
224   // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not
225   // scanned.  Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when
226   // it becomes garbage.  Better to not make this change until sizes of young-gen and old-gen are completely
227   // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
228   // has carefully analyzed the required sizes of an application's young-gen and old-gen.
229   const size_t used_bytes = obj->size() * HeapWordSize;
230   const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
231   const size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
232   const size_t index_limit = region->index() + spanned_regions;
233 
234   ShenandoahGeneration* const old_generation = _heap->old_generation();
235   ShenandoahGeneration* const young_generation = _heap->young_generation();
236   {
237     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
238     // young to old.
239     ShenandoahHeapLocker locker(_heap->lock());
240 
241     // We promote humongous objects unconditionally, without checking for availability.  We adjust
242     // usage totals, including humongous waste, after evacuation is done.
243     log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, region->index(), spanned_regions);
244 
245     young_generation->decrease_used(used_bytes);
246     young_generation->decrease_humongous_waste(humongous_waste);
247     young_generation->decrease_affiliated_region_count(spanned_regions);
248 
249     // transfer_to_old() increases capacity of old and decreases capacity of young
250     _heap->generation_sizer()->force_transfer_to_old(spanned_regions);
251 
252     // For this region and each humongous continuation region spanned by this humongous object, change
253     // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
254     // in the last humongous region that is not spanned by obj is currently not used.
255     for (size_t i = region->index(); i < index_limit; i++) {
256       ShenandoahHeapRegion* r = _heap->get_region(i);
257       log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
258               r->index(), p2i(r->bottom()), p2i(r->top()));
259       // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
260       r->set_affiliation(OLD_GENERATION);
261     }
262 
263     old_generation->increase_affiliated_region_count(spanned_regions);
264     old_generation->increase_used(used_bytes);
265     old_generation->increase_humongous_waste(humongous_waste);
266   }
267 
268   // Since this region may have served previously as OLD, it may hold obsolete object range info.
269   HeapWord* const humongous_bottom = region->bottom();
270   _heap->card_scan()->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
271   // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
272   _heap->card_scan()->register_object_without_lock(humongous_bottom);
273 
274   if (obj->is_typeArray()) {
275     // Primitive arrays don't need to be scanned.
276     log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
277             region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
278     _heap->card_scan()->mark_range_as_clean(humongous_bottom, obj->size());
279   } else {
280     log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
281             region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
282     _heap->card_scan()->mark_range_as_dirty(humongous_bottom, obj->size());
283   }
284 }