1 /*
  2  * Copyright (c) 2020, 2021 Amazon.com, Inc. and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 28 #include "gc/shenandoah/shenandoahGeneration.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.hpp"
 30 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 34 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 35 #include "gc/shenandoah/shenandoahUtils.hpp"
 36 #include "gc/shenandoah/shenandoahVerifier.hpp"
 37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 39 
 40 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 41  private:
 42   ShenandoahMarkingContext* const _ctx;
 43  public:
 44   ShenandoahResetUpdateRegionStateClosure() :
 45     _ctx(ShenandoahHeap::heap()->marking_context()) {}
 46 
 47   void heap_region_do(ShenandoahHeapRegion* r) {
 48     if (r->is_active()) {
 49       // Reset live data and set TAMS optimistically. We would recheck these under the pause
 50       // anyway to capture any updates that happened since now.
 51       _ctx->capture_top_at_mark_start(r);
 52       r->clear_live_data();
 53     }
 54   }
 55 
 56   bool is_thread_safe() { return true; }
 57 };
 58 
 59 class ShenandoahResetBitmapTask : public ShenandoahHeapRegionClosure {
 60  private:
 61   ShenandoahHeap* _heap;
 62   ShenandoahMarkingContext* const _ctx;
 63  public:
 64   ShenandoahResetBitmapTask() :
 65     _heap(ShenandoahHeap::heap()),
 66     _ctx(_heap->marking_context()) {}
 67 
 68   void heap_region_do(ShenandoahHeapRegion* region) {
 69     if (_heap->is_bitmap_slice_committed(region)) {
 70       _ctx->clear_bitmap(region);
 71     }
 72   }
 73 
 74   bool is_thread_safe() { return true; }
 75 };
 76 
 77 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure {
 78  private:
 79   ShenandoahHeap* _heap;
 80   RememberedScanner* _scanner;
 81  public:
 82   ShenandoahMergeWriteTable() : _heap(ShenandoahHeap::heap()), _scanner(_heap->card_scan()) {}
 83 
 84   virtual void heap_region_do(ShenandoahHeapRegion* r) override {
 85     if (r->is_old()) {
 86       _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words());
 87     }
 88   }
 89 
 90   virtual bool is_thread_safe() override {
 91     return true;
 92   }
 93 };
 94 
 95 class ShenandoahSquirrelAwayCardTable: public ShenandoahHeapRegionClosure {
 96  private:
 97   ShenandoahHeap* _heap;
 98   RememberedScanner* _scanner;
 99  public:
100   ShenandoahSquirrelAwayCardTable() :
101     _heap(ShenandoahHeap::heap()),
102     _scanner(_heap->card_scan()) {}
103 
104   void heap_region_do(ShenandoahHeapRegion* region) {
105     if (region->is_old()) {
106       _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words());
107     }
108   }
109 
110   bool is_thread_safe() { return true; }
111 };
112 
113 void ShenandoahGeneration::confirm_heuristics_mode() {
114   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
115     vm_exit_during_initialization(
116             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
117                     _heuristics->name()));
118   }
119   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
120     vm_exit_during_initialization(
121             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
122                     _heuristics->name()));
123   }
124 }
125 
126 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
127   _heuristics = gc_mode->initialize_heuristics(this);
128   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval);
129   confirm_heuristics_mode();
130   return _heuristics;
131 }
132 
133 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() {
134   return Atomic::load(&_bytes_allocated_since_gc_start);;
135 }
136 
137 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() {
138   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
139 }
140 
141 void ShenandoahGeneration::increase_allocated(size_t bytes) {
142   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
143 }
144 
145 void ShenandoahGeneration::log_status() const {
146   typedef LogTarget(Info, gc, ergo) LogGcInfo;
147 
148   if (!LogGcInfo::is_enabled()) {
149     return;
150   }
151 
152   // Not under a lock here, so read each of these once to make sure
153   // byte size in proper unit and proper unit for byte size are consistent.
154   size_t v_used = used();
155   size_t v_used_regions = used_regions_size();
156   size_t v_soft_max_capacity = soft_max_capacity();
157   size_t v_max_capacity = max_capacity();
158   size_t v_available = available();
159   LogGcInfo::print("%s generation used: " SIZE_FORMAT "%s, used regions: " SIZE_FORMAT "%s, "
160                    "soft capacity: " SIZE_FORMAT "%s, max capacity: " SIZE_FORMAT " %s, available: " SIZE_FORMAT " %s",
161                    name(),
162                    byte_size_in_proper_unit(v_used), proper_unit_for_byte_size(v_used),
163                    byte_size_in_proper_unit(v_used_regions), proper_unit_for_byte_size(v_used_regions),
164                    byte_size_in_proper_unit(v_soft_max_capacity), proper_unit_for_byte_size(v_soft_max_capacity),
165                    byte_size_in_proper_unit(v_max_capacity), proper_unit_for_byte_size(v_max_capacity),
166                    byte_size_in_proper_unit(v_available), proper_unit_for_byte_size(v_available));
167 }
168 
169 void ShenandoahGeneration::reset_mark_bitmap() {
170   ShenandoahHeap* heap = ShenandoahHeap::heap();
171   heap->assert_gc_workers(heap->workers()->active_workers());
172 
173   set_mark_incomplete();
174 
175   ShenandoahResetBitmapTask task;
176   parallel_heap_region_iterate(&task);
177 }
178 
179 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations.
180 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the
181 // location of the card table.  So the interim implementation of swap_remembered_set will copy the write-table
182 // onto the read-table and will then clear the write-table.
183 void ShenandoahGeneration::swap_remembered_set() {
184   // Must be sure that marking is complete before we swap remembered set.
185   ShenandoahHeap* heap = ShenandoahHeap::heap();
186   heap->assert_gc_workers(heap->workers()->active_workers());
187   shenandoah_assert_safepoint();
188 
189   // TODO: Eventually, we want replace this with a constant-time exchange of pointers.
190   ShenandoahSquirrelAwayCardTable task;
191   heap->old_generation()->parallel_heap_region_iterate(&task);
192 }
193 
194 // If a concurrent cycle fails _after_ the card table has been swapped we need to update the read card
195 // table with any writes that have occurred during the transition to the degenerated cycle. Without this,
196 // newly created objects which are only referenced by old objects could be lost when the remembered set
197 // is scanned during the degenerated mark.
198 void ShenandoahGeneration::merge_write_table() {
199   // This should only happen for degenerated cycles
200   ShenandoahHeap* heap = ShenandoahHeap::heap();
201   heap->assert_gc_workers(heap->workers()->active_workers());
202   shenandoah_assert_safepoint();
203 
204   ShenandoahMergeWriteTable task;
205   heap->old_generation()->parallel_heap_region_iterate(&task);
206 }
207 
208 void ShenandoahGeneration::prepare_gc() {
209   // Reset mark bitmap for this generation (typically young)
210   reset_mark_bitmap();
211   // Capture Top At Mark Start for this generation (typically young)
212   ShenandoahResetUpdateRegionStateClosure cl;
213   parallel_heap_region_iterate(&cl);
214 }
215 
216 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* heap, bool* preselected_regions,
217                                                       ShenandoahCollectionSet* collection_set,
218                                                       size_t &old_regions_loaned_for_young_evac, size_t &regions_available_to_loan,
219                                                       size_t &minimum_evacuation_reserve, size_t &consumed_by_advance_promotion) {
220   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
221   minimum_evacuation_reserve = ShenandoahOldCompactionReserve * region_size_bytes;
222   old_regions_loaned_for_young_evac = 0;
223   regions_available_to_loan = 0;
224   consumed_by_advance_promotion = 0;
225   if (heap->mode()->is_generational()) {
226     ShenandoahGeneration* old_generation = heap->old_generation();
227     ShenandoahYoungGeneration* young_generation = heap->young_generation();
228     size_t avail_evac_reserve_for_loan_to_young_gen = 0;
229     size_t old_evacuation_reserve = 0;
230     size_t num_regions = heap->num_regions();
231 
232     // During initialization and phase changes, it is more likely that fewer objects die young and old-gen
233     // memory is not yet full (or is in the process of being replaced).  During these times especially, it
234     // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
235     // of execution.
236 
237     // Calculate EvacuationReserve before PromotionReserve.  Evacuation is more critical than promotion.
238     // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory.  Promotions are less
239     // critical.  If we cannot promote, there may be degradation of young-gen memory because old objects
240     // accumulate there until they can be promoted.  This increases the young-gen marking and evacuation work.
241 
242     // Do not fill up old-gen memory with promotions.  Reserve some amount of memory for compaction purposes.
243     ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
244     size_t young_evac_reserve_max = 0;
245     if (old_heuristics->unprocessed_old_collection_candidates() > 0) {
246       // Compute old_evacuation_reserve: how much memory are we reserving to hold the results of
247       // evacuating old-gen heap regions?  In order to sustain a consistent pace of young-gen collections,
248       // the goal is to maintain a consistent value for this parameter (when the candidate set is not
249       // empty).  This value is the minimum of:
250       //   1. old_gen->available()
251       //   2. old-gen->capacity() * ShenandoahOldEvacReserve) / 100
252       //       (e.g. old evacuation should be no larger than 5% of old_gen capacity)
253       //   3. ((young_gen->capacity * ShenandoahEvacReserve / 100) * ShenandoahOldEvacRatioPercent) / 100
254       //       (e.g. old evacuation should be no larger than 12% of young-gen evacuation)
255       old_evacuation_reserve = old_generation->available();
256       assert(old_evacuation_reserve > minimum_evacuation_reserve, "Old-gen available has not been preserved!");
257       size_t old_evac_reserve_max = old_generation->soft_max_capacity() * ShenandoahOldEvacReserve / 100;
258       if (old_evac_reserve_max < old_evacuation_reserve) {
259         old_evacuation_reserve = old_evac_reserve_max;
260       }
261       young_evac_reserve_max =
262         (((young_generation->soft_max_capacity() * ShenandoahEvacReserve) / 100) * ShenandoahOldEvacRatioPercent) / 100;
263       if (young_evac_reserve_max < old_evacuation_reserve) {
264         old_evacuation_reserve = young_evac_reserve_max;
265       }
266     }
267 
268     if (minimum_evacuation_reserve > old_generation->available()) {
269       // Due to round-off errors during enforcement of minimum_evacuation_reserve during previous GC passes,
270       // there can be slight discrepancies here.
271       minimum_evacuation_reserve = old_generation->available();
272     }
273     if (old_evacuation_reserve < minimum_evacuation_reserve) {
274       // Even if there's nothing to be evacuated on this cycle, we still need to reserve this memory for future
275       // evacuations.  It is ok to loan this memory to young-gen if we don't need it for evacuation on this pass.
276       avail_evac_reserve_for_loan_to_young_gen = minimum_evacuation_reserve - old_evacuation_reserve;
277       old_evacuation_reserve = minimum_evacuation_reserve;
278     }
279 
280     heap->set_old_evac_reserve(old_evacuation_reserve);
281     heap->reset_old_evac_expended();
282 
283     // Compute the young evauation reserve: This is how much memory is available for evacuating young-gen objects.
284     // We ignore the possible effect of promotions, which reduce demand for young-gen evacuation memory.
285     //
286     // TODO: We could give special treatment to the regions that have reached promotion age, because we know their
287     // live data is entirely eligible for promotion.  This knowledge can feed both into calculations of young-gen
288     // evacuation reserve and promotion reserve.
289     //
290     //  young_evacuation_reserve for young generation: how much memory are we reserving to hold the results
291     //  of evacuating young collection set regions?  This is typically smaller than the total amount
292     //  of available memory, and is also smaller than the total amount of marked live memory within
293     //  young-gen.  This value is the smaller of
294     //
295     //    1. (young_gen->capacity() * ShenandoahEvacReserve) / 100
296     //    2. (young_gen->available() + old_gen_memory_available_to_be_loaned
297     //
298     //  ShenandoahEvacReserve represents the configured taget size of the evacuation region.  We can only honor
299     //  this target if there is memory available to hold the evacuations.  Memory is available if it is already
300     //  free within young gen, or if it can be borrowed from old gen.  Since we have not yet chosen the collection
301     //  sets, we do not yet know the exact accounting of how many regions will be freed by this collection pass.
302     //  What we do know is that there will be at least one evacuated young-gen region for each old-gen region that
303     //  is loaned to the evacuation effort (because regions to be collected consume more memory than the compacted
304     //  regions that will replace them).  In summary, if there are old-gen regions that are available to hold the
305     //  results of young-gen evacuations, it is safe to loan them for this purpose.  At this point, we have not yet
306     //  established a promoted_reserve.  We'll do that after we choose the collection set and analyze its impact
307     //  on available memory.
308     //
309     // We do not know the evacuation_supplement until after we have computed the collection set.  It is not always
310     // the case that young-regions inserted into the collection set will result in net decrease of in-use regions
311     // because ShenandoahEvacWaste times multiplied by memory within the region may be larger than the region size.
312     // The problem is especially relevant to regions that have been inserted into the collection set because they have
313     // reached tenure age.  These regions tend to have much higher utilization (e.g. 95%).  These regions also offer
314     // a unique opportunity because we know that every live object contained within the region is elgible to be
315     // promoted.  Thus, the following implementation treats these regions specially:
316     //
317     //  1. Before beginning collection set selection, we tally the total amount of live memory held within regions
318     //     that are known to have reached tenure age.  If this memory times ShenandoahEvacWaste is available within
319     //     old-gen memory, establish an advance promotion reserve to hold all or some percentage of these objects.
320     //     This advance promotion reserve is excluded from memory available for holding old-gen evacuations and cannot
321     //     be "loaned" to young gen.
322     //
323     //  2. Tenure-aged regions are included in the collection set iff their evacuation size * ShenandoahEvacWaste fits
324     //     within the advance promotion reserve.  It is counter productive to evacuate these regions if they cannot be
325     //     evacuated directly into old-gen memory.  So if there is not sufficient memory to hold copies of their
326     //     live data right now, we'll just let these regions remain in young for now, to be evacuated by a subsequent
327     //     evacuation pass.
328     //
329     //  3. Next, we calculate a young-gen evacuation budget, which is the smaller of the two quantities mentioned
330     //     above.  old_gen_memory_available_to_be_loaned is calculated as:
331     //       old_gen->available - (advance-promotion-reserve + old-gen_evacuation_reserve)
332     //
333     //  4. When choosing the collection set, special care is taken to assure that the amount of loaned memory required to
334     //     hold the results of evacuation is smaller than the total memory occupied by the regions added to the collection
335     //     set.  We need to take these precautions because we do not know how much memory will be reclaimed by evacuation
336     //     until after the collection set has been constructed.  The algorithm is as follows:
337     //
338     //     a. We feed into the algorithm (i) young available at the start of evacuation and (ii) the amount of memory
339     //        loaned from old-gen that is available to hold the results of evacuation.
340     //     b. As candidate regions are added into the young-gen collection set, we maintain accumulations of the amount
341     //        of memory spanned by the collection set regions and the amount of memory that must be reserved to hold
342     //        evacuation results (by multiplying live-data size by ShenandoahEvacWaste).  We process candidate regions
343     //        in order of decreasing amounts of garbage.  We skip over (and do not include into the collection set) any
344     //        regions that do not satisfy all of the following conditions:
345     //
346     //          i. The amount of live data within the region as scaled by ShenandoahEvacWaste must fit within the
347     //             relevant evacuation reserve (live data of old-gen regions must fit within the old-evac-reserve, live
348     //             data of young-gen tenure-aged regions must fit within the advance promotion reserve, live data within
349     //             other young-gen regions must fit within the youn-gen evacuation reserve).
350     //         ii. The accumulation of memory consumed by evacuation must not exceed the accumulation of memory reclaimed
351     //             through evacuation by more than young-gen available.
352     //        iii. Other conditions may be enforced as appropriate for specific heuristics.
353     //
354     //       Note that regions are considered for inclusion in the selection set in order of decreasing amounts of garbage.
355     //       It is possible that a region with a larger amount of garbage will be rejected because it also has a larger
356     //       amount of live data and some region that follows this region in candidate order is included in the collection
357     //       set (because it has less live data and thus can fit within the evacuation limits even though it has less
358     //       garbage).
359 
360     size_t young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
361     // old evacuation can pack into existing partially used regions.  young evacuation and loans for young allocations
362     // need to target regions that do not already hold any old-gen objects.  Round down.
363     regions_available_to_loan = old_generation->free_unaffiliated_regions();
364     consumed_by_advance_promotion = _heuristics->select_aged_regions(old_generation->available() - old_evacuation_reserve,
365                                                                      num_regions, preselected_regions);
366     size_t net_available_old_regions =
367       (old_generation->available() - old_evacuation_reserve - consumed_by_advance_promotion) / region_size_bytes;
368 
369     if (regions_available_to_loan > net_available_old_regions) {
370       regions_available_to_loan = net_available_old_regions;
371     }
372 
373     // Otherwise, regions_available_to_loan is less than net_available_old_regions because available memory is
374     // scattered between multiple partially used regions.
375 
376     if (young_evacuation_reserve > young_generation->available()) {
377       size_t short_fall = young_evacuation_reserve - young_generation->available();
378       if (regions_available_to_loan * region_size_bytes >= short_fall) {
379         old_regions_loaned_for_young_evac = (short_fall + region_size_bytes - 1) / region_size_bytes;
380         regions_available_to_loan -= old_regions_loaned_for_young_evac;
381       } else {
382         old_regions_loaned_for_young_evac = regions_available_to_loan;
383         regions_available_to_loan = 0;
384         young_evacuation_reserve = young_generation->available() + old_regions_loaned_for_young_evac * region_size_bytes;
385         // In this case, there's no memory available for new allocations while evacuating and updating, unless we
386         // find more old-gen memory to borrow below.
387       }
388     } else {
389       old_regions_loaned_for_young_evac = 0;
390     }
391     // In generational mode, we may end up choosing a young collection set that contains so many promotable objects
392     // that there is not sufficient space in old generation to hold the promoted objects.  That is ok because we have
393     // assured there is sufficient space in young generation to hold the rejected promotion candidates.  These rejected
394     // promotion candidates will presumably be promoted in a future evacuation cycle.
395     heap->set_young_evac_reserve(young_evacuation_reserve);
396     collection_set->establish_preselected(preselected_regions);
397   } else {
398     // Not generational mode: limit young evac reserve by young available; no need to establish old_evac_reserve.
399     ShenandoahYoungGeneration* young_generation = heap->young_generation();
400     size_t young_evac_reserve = (young_generation->soft_max_capacity() * ShenandoahEvacReserve) / 100;
401     if (young_evac_reserve > young_generation->available()) {
402       young_evac_reserve = young_generation->available();
403     }
404     heap->set_young_evac_reserve(young_evac_reserve);
405   }
406 }
407 
408 // Having chosen the collection set, adjust the budgets for generatioal mode based on its composition.  Note
409 // that young_generation->available() now knows about recently discovered immediate garbage.
410 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* heap, ShenandoahCollectionSet* collection_set,
411                                                      size_t old_regions_loaned_for_young_evac, size_t regions_available_to_loan,
412                                                      size_t minimum_evacuation_reserve, size_t consumed_by_advance_promotion) {
413   if (heap->mode()->is_generational()) {
414     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
415     ShenandoahOldGeneration* old_generation = heap->old_generation();
416     ShenandoahYoungGeneration* young_generation = heap->young_generation();
417     size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
418     size_t old_evacuated_committed = (size_t) (ShenandoahEvacWaste * old_evacuated);
419     size_t old_evacuation_reserve = heap->get_old_evac_reserve();
420     // Immediate garbage found during choose_collection_set() is all young
421     size_t immediate_garbage = collection_set->get_immediate_trash();
422     size_t old_available = old_generation->available();
423     size_t young_available = young_generation->available() + immediate_garbage;
424 
425     assert(consumed_by_advance_promotion >= collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste,
426            "Advance promotion should be at least young_bytes_to_be_promoted * ShenandoahEvacWaste");
427 
428     assert(consumed_by_advance_promotion <= (collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste * 33) / 32,
429            "Round-off errors should be less than 3.125%%, consumed by advance: " SIZE_FORMAT ", promoted: " SIZE_FORMAT,
430            consumed_by_advance_promotion, (size_t) (collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste));
431 
432     collection_set->abandon_preselected();
433 
434     if (old_evacuated_committed > old_evacuation_reserve) {
435       // This should only happen due to round-off errors when enforcing ShenandoahEvacWaste
436       assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
437              "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
438              old_evacuated_committed, old_evacuation_reserve);
439       old_evacuated_committed = old_evacuation_reserve;
440     } else if (old_evacuated_committed < old_evacuation_reserve) {
441       // This may happen if the old-gen collection consumes less than full budget.
442 
443       // If we shrink old_evacuation_reserve by more than a region size, we can expand regions_available_to_loan.
444       // Can only give back regions that are fully unused, so round down.
445       size_t old_evac_regions_unused = (old_evacuation_reserve - old_evacuated_committed) / region_size_bytes;
446       regions_available_to_loan += old_evac_regions_unused;
447       old_evacuation_reserve = old_evacuated_committed;
448       heap->set_old_evac_reserve(old_evacuation_reserve);
449     }
450 
451     // Recompute old_regions_loaned_for_young_evac because young-gen collection set may not need all the memory
452     // originally reserved.
453     size_t young_promoted = collection_set->get_young_bytes_to_be_promoted();
454     size_t young_promoted_reserve_used = (size_t) (ShenandoahEvacWaste * young_promoted);
455 
456     size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation() - young_promoted;
457     size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * young_evacuated);
458 
459     heap->set_young_evac_reserve(young_evacuated_reserve_used);
460 
461     // Adjust old_regions_loaned_for_young_evac to feed into calculations of promoted_reserve
462     if (young_evacuated_reserve_used > young_available) {
463       size_t short_fall = young_evacuated_reserve_used - young_available;
464 
465       // region_size_bytes is a power of 2.  loan an integral number of regions.
466       size_t revised_loan_for_young_evacuation = (short_fall + region_size_bytes - 1) / region_size_bytes;
467 
468       // Undo the previous loan
469       regions_available_to_loan += old_regions_loaned_for_young_evac;
470       old_regions_loaned_for_young_evac = revised_loan_for_young_evacuation;
471 
472       // And make a new loan
473       assert(regions_available_to_loan > old_regions_loaned_for_young_evac, "Cannot loan regions that we do not have");
474       regions_available_to_loan -= revised_loan_for_young_evacuation;
475     } else {
476       // Undo the prevous loan
477       regions_available_to_loan += old_regions_loaned_for_young_evac;
478       old_regions_loaned_for_young_evac = 0;
479     }
480 
481     size_t old_bytes_loaned_for_young_evac = old_regions_loaned_for_young_evac * region_size_bytes;
482     size_t old_bytes_reserved_for_alloc_supplement = 0;
483     size_t old_regions_reserved_for_alloc_supplement = 0;
484     // Need to enforce that old_evacuated_committed + old_bytes_loaned_for_young_evac >= minimum_evacuation_reserve
485     // in order to prevent promotion reserve from violating minimum evacuation reserve.
486     if (old_evacuated_committed + old_bytes_loaned_for_young_evac < minimum_evacuation_reserve) {
487       // Reserve some of the regions available to loan for use as allocation supplement to assure memory not consumed by promotion
488       size_t excess_bytes = minimum_evacuation_reserve - (old_evacuated_committed + old_bytes_loaned_for_young_evac);
489       size_t excess_regions = (excess_bytes - 1 + region_size_bytes) / region_size_bytes;
490       if (regions_available_to_loan <= excess_regions) {
491         excess_regions = regions_available_to_loan;
492         // Since we can't reserve entire excess for alloc supplement, pretend more is consumed by old-evacuation
493         old_evacuated_committed =
494           minimum_evacuation_reserve - old_bytes_loaned_for_young_evac - excess_regions * region_size_bytes;
495       }
496       regions_available_to_loan -= excess_regions;
497       old_bytes_reserved_for_alloc_supplement = excess_regions * region_size_bytes;
498       old_regions_reserved_for_alloc_supplement = excess_regions;
499     }
500 
501     // Limit promoted_reserve so that we can set aside memory to be loaned from old-gen to young-gen.  This
502     // value is not "critical".  If we underestimate, certain promotions will simply be deferred.  If we put
503     // "all the rest" of old-gen memory into the promotion reserve, we'll have nothing left to loan to young-gen
504     // during the evac and update phases of GC.  So we "limit" the sizes of the promotion budget to be the smaller of:
505     //
506     //  1. old_gen->available - (old_evacuation_committed + old_bytes_loaned_for_young_evac + consumed_by_advance_promotion
507     //                           + old_bytes_[already]_ reserved_for_alloc_supplement)
508     //  2. young bytes reserved for evacuation (we can't promote more than young is evacuating)
509 
510     assert(old_available > old_evacuated_committed, "Cannot evacuate more than available");
511     assert(old_available > old_evacuated_committed + old_bytes_loaned_for_young_evac,
512            "Cannot loan young evac more than available");
513     assert(old_available > old_evacuated_committed + old_bytes_loaned_for_young_evac + consumed_by_advance_promotion,
514            "Cannot promote more than available");
515     assert(old_available > (old_evacuated_committed + old_bytes_loaned_for_young_evac +
516                                           consumed_by_advance_promotion + old_bytes_reserved_for_alloc_supplement),
517            "Cannot loan for alloc supplement more than available");
518 
519     size_t promotion_reserve = regions_available_to_loan * region_size_bytes;
520     assert(promotion_reserve <= old_available - (old_evacuated_committed + consumed_by_advance_promotion +
521                                                  old_bytes_loaned_for_young_evac + old_bytes_reserved_for_alloc_supplement),
522            "Byte reserves do not match region reserves");
523 
524     // We experimented with constraining promoted_reserve to be no larger than 4 times the size of previously_promoted,
525     // but this constraint was too limiting, resulting in failure of legitimate promotions.  This was tried before we
526     // had special handling in place for advance promotion.  We should retry now that advance promotion is handled
527     // specially.
528 
529     // We had also experimented with constraining promoted_reserve to be no more than young_evacuation_committed
530     // divided by promotion_divisor, where:
531     //  size_t promotion_divisor = (0x02 << InitialTenuringThreshold) - 1;
532     // This also was found to be too limiting, resulting in failure of legitimate promotions.
533     //
534     // Both experiments were conducted in the presence of other bugs which could have been the root cause for
535     // the failures identified above as being "too limiting".  TODO: conduct new experiments with the more limiting
536     // values of young_evacuation_reserved_used.
537 
538     // young_evacuation_reserve_used already excludes bytes known to be promoted, which equals consumed_by_advance_promotion
539     if (young_evacuated_reserve_used < promotion_reserve) {
540       // Shrink promotion_reserve if it is larger than the memory to be consumed by evacuating all young objects in
541       // collection set, including anticipated waste.  There's no benefit in using a larger promotion_reserve.
542       // young_evacuation_reserve_used does not include live memory within tenure-aged regions.
543       promotion_reserve = young_evacuated_reserve_used;
544     }
545 
546     assert(old_available >= (promotion_reserve + old_evacuated_committed + old_bytes_loaned_for_young_evac +
547                              consumed_by_advance_promotion + old_bytes_reserved_for_alloc_supplement),
548            "Budget exceeds available old-gen memory");
549     log_debug(gc)("Old available: " SIZE_FORMAT ", Original promotion reserve: " SIZE_FORMAT ", Old evacuation reserve: "
550                   SIZE_FORMAT ", Advance promotion reserve supplement: " SIZE_FORMAT
551                   ", Old loaned for young evacuation: " SIZE_FORMAT ", Old reserved for alloc supplement: " SIZE_FORMAT,
552                   old_available, promotion_reserve, old_evacuated_committed, consumed_by_advance_promotion,
553                   old_regions_loaned_for_young_evac * region_size_bytes, old_bytes_reserved_for_alloc_supplement);
554 
555     promotion_reserve += consumed_by_advance_promotion;
556     heap->set_promoted_reserve(promotion_reserve);
557 
558     size_t promotion_regions = (promotion_reserve + region_size_bytes - 1) / region_size_bytes;
559     assert(regions_available_to_loan >= promotion_regions, "Promoting more regions than memory is available");
560     regions_available_to_loan -= promotion_regions;
561     heap->reset_promoted_expended();
562     if (collection_set->get_old_bytes_reserved_for_evacuation() == 0) {
563       // Setting old evacuation reserve to zero denotes that there is no old-gen evacuation in this pass.
564       heap->set_old_evac_reserve(0);
565     }
566 
567     size_t old_gen_usage_base = old_generation->used() - collection_set->get_old_garbage();
568     heap->capture_old_usage(old_gen_usage_base);
569 
570     // Compute the evacuation supplement, which is extra memory borrowed from old-gen that can be allocated
571     // by mutators while GC is working on evacuation and update-refs.  This memory can be temporarily borrowed
572     // from old-gen allotment, then repaid at the end of update-refs from the recycled collection set.  After
573     // we have computed the collection set based on the parameters established above, we can make additional
574     // loans based on our knowledge of the collection set to determine how much allocation we can allow
575     // during the evacuation and update-refs phases of execution.  The total available supplement is the smaller of:
576     //
577     //   1. old_gen->available() -
578     //        (promotion_reserve + old_evacuation_commitment + old_bytes_loaned_for_young_evac)
579     //   2. The replenishment budget (number of regions in collection set - the number of regions already
580     //         under lien for the young_evacuation_reserve)
581     //
582 
583     // Regardless of how many regions may be available to be loaned, we can loan no more regions than
584     // the total number of young regions to be evacuated.  Call this the regions_for_runway.
585 
586     size_t young_regions_evacuated = collection_set->get_young_region_count();
587     size_t regions_for_runway = 0;
588     size_t already_loaned_regions = old_regions_loaned_for_young_evac + old_regions_reserved_for_alloc_supplement;
589     if (already_loaned_regions == 0) {
590       regions_for_runway = young_regions_evacuated;
591     } else if (young_regions_evacuated > already_loaned_regions) {
592       regions_for_runway = young_regions_evacuated - already_loaned_regions;
593     } else {
594       regions_for_runway = 0;
595     }
596 
597     if (regions_available_to_loan > regions_for_runway) {
598       regions_available_to_loan -= regions_for_runway;
599     } else {
600       regions_for_runway = regions_available_to_loan;
601       regions_available_to_loan = 0;
602     }
603 
604     size_t allocation_supplement = regions_for_runway * region_size_bytes + old_bytes_reserved_for_alloc_supplement;
605     heap->set_alloc_supplement_reserve(allocation_supplement);
606 
607     // TODO: young_available, which feeds into alloc_budget_evac_and_update is lacking memory available within
608     // existing young-gen regions that were not selected for the collection set.  Add this in and adjust the
609     // log message (where it says "empty-region allocation budget").
610 
611     log_debug(gc)("Memory reserved for young evacuation: " SIZE_FORMAT "%s for evacuating " SIZE_FORMAT
612                   "%s out of young available: " SIZE_FORMAT "%s",
613                   byte_size_in_proper_unit(young_evacuated_reserve_used),
614                   proper_unit_for_byte_size(young_evacuated_reserve_used),
615                   byte_size_in_proper_unit(young_evacuated), proper_unit_for_byte_size(young_evacuated),
616                   byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
617 
618     log_debug(gc)("Memory reserved for old evacuation: " SIZE_FORMAT "%s for evacuating " SIZE_FORMAT
619                   "%s out of old available: " SIZE_FORMAT "%s",
620                   byte_size_in_proper_unit(old_evacuated), proper_unit_for_byte_size(old_evacuated),
621                   byte_size_in_proper_unit(old_evacuated), proper_unit_for_byte_size(old_evacuated),
622                   byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available));
623 
624     assert(old_available >= old_evacuation_reserve + promotion_reserve + old_bytes_loaned_for_young_evac + allocation_supplement,
625            "old_available must be larger than accumulated reserves");
626 
627     size_t regular_promotion = promotion_reserve - consumed_by_advance_promotion;
628     size_t excess =
629       old_available - (old_evacuation_reserve + promotion_reserve + old_bytes_loaned_for_young_evac + allocation_supplement);
630     log_info(gc, ergo)("Old available: " SIZE_FORMAT "%s is partitioned into old evacuation budget: " SIZE_FORMAT
631                        "%s, aged region promotion budget: " SIZE_FORMAT
632                        "%s, regular region promotion budget: " SIZE_FORMAT
633                        "%s, loaned for young evacuation: " SIZE_FORMAT
634                        "%s, loaned for young allocations: " SIZE_FORMAT
635                        "%s, excess: " SIZE_FORMAT "%s",
636                        byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
637                        byte_size_in_proper_unit(old_evacuation_reserve), proper_unit_for_byte_size(old_evacuation_reserve),
638                        byte_size_in_proper_unit(consumed_by_advance_promotion),
639                        proper_unit_for_byte_size(consumed_by_advance_promotion),
640                        byte_size_in_proper_unit(regular_promotion), proper_unit_for_byte_size(regular_promotion),
641                        byte_size_in_proper_unit(old_bytes_loaned_for_young_evac),
642                        proper_unit_for_byte_size(old_bytes_loaned_for_young_evac),
643                        byte_size_in_proper_unit(allocation_supplement), proper_unit_for_byte_size(allocation_supplement),
644                        byte_size_in_proper_unit(excess), proper_unit_for_byte_size(excess));
645   }
646   // else, not generational: no evacuation budget adjustments required
647 }
648 
649 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
650   ShenandoahHeap* heap = ShenandoahHeap::heap();
651   ShenandoahCollectionSet* collection_set = heap->collection_set();
652   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
653 
654   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
655   assert(generation_mode() != OLD, "Only YOUNG and GLOBAL GC perform evacuations");
656   {
657     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
658                             ShenandoahPhaseTimings::degen_gc_final_update_region_states);
659     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
660     parallel_heap_region_iterate(&cl);
661 
662     if (generation_mode() == YOUNG) {
663       // We always need to update the watermark for old regions. If there
664       // are mixed collections pending, we also need to synchronize the
665       // pinned status for old regions. Since we are already visiting every
666       // old region here, go ahead and sync the pin status too.
667       ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr);
668       heap->old_generation()->parallel_heap_region_iterate(&old_cl);
669     }
670 
671     heap->assert_pinned_region_status();
672   }
673 
674   {
675     size_t old_regions_loaned_for_young_evac, regions_available_to_loan, minimum_evacuation_reserve, consumed_by_advance_promotion;
676     bool* preselected_regions = nullptr;
677     if (heap->mode()->is_generational()) {
678       preselected_regions = (bool*) alloca(heap->num_regions() * sizeof(bool));
679       for (unsigned int i = 0; i < heap->num_regions(); i++) {
680         preselected_regions[i] = false;
681       }
682     }
683 
684     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
685                             ShenandoahPhaseTimings::degen_gc_choose_cset);
686 
687     ShenandoahHeapLocker locker(heap->lock());
688     collection_set->clear();
689     // TODO: young_available can include available (between top() and end()) within each young region that is not
690     // part of the collection set.  Making this memory available to the young_evacuation_reserve allows a larger
691     // young collection set to be chosen when available memory is under extreme pressure.  Implementing this "improvement"
692     // is tricky, because the incremental construction of the collection set actually changes the amount of memory
693     // available to hold evacuated young-gen objects.  As currently implemented, the memory that is available within
694     // non-empty regions that are not selected as part of the collection set can be allocated by the mutator while
695     // GC is evacuating and updating references.
696 
697     // Budgeting parameters to compute_evacuation_budgets are passed by reference.
698     compute_evacuation_budgets(heap, preselected_regions, collection_set, old_regions_loaned_for_young_evac,
699                                regions_available_to_loan, minimum_evacuation_reserve, consumed_by_advance_promotion);
700     _heuristics->choose_collection_set(collection_set, heap->old_heuristics());
701     if (!collection_set->is_empty()) {
702       adjust_evacuation_budgets(heap, collection_set, old_regions_loaned_for_young_evac, regions_available_to_loan,
703                                 minimum_evacuation_reserve, consumed_by_advance_promotion);
704     }
705     // otherwise, this is an abbreviated cycle and we make no use of evacuation budgets.
706   }
707 
708   {
709     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
710                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
711     ShenandoahHeapLocker locker(heap->lock());
712     heap->free_set()->rebuild();
713   }
714 }
715 
716 bool ShenandoahGeneration::is_bitmap_clear() {
717   ShenandoahHeap* heap = ShenandoahHeap::heap();
718   ShenandoahMarkingContext* context = heap->marking_context();
719   size_t num_regions = heap->num_regions();
720   for (size_t idx = 0; idx < num_regions; idx++) {
721     ShenandoahHeapRegion* r = heap->get_region(idx);
722     if (contains(r) && (r->affiliation() != FREE)) {
723       if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) &&
724           !context->is_bitmap_clear_range(r->bottom(), r->end())) {
725         return false;
726       }
727     }
728   }
729   return true;
730 }
731 
732 bool ShenandoahGeneration::is_mark_complete() {
733   return _is_marking_complete.is_set();
734 }
735 
736 void ShenandoahGeneration::set_mark_complete() {
737   _is_marking_complete.set();
738 }
739 
740 void ShenandoahGeneration::set_mark_incomplete() {
741   _is_marking_complete.unset();
742 }
743 
744 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
745   assert(is_mark_complete(), "Marking must be completed.");
746   return ShenandoahHeap::heap()->marking_context();
747 }
748 
749 void ShenandoahGeneration::cancel_marking() {
750   log_info(gc)("Cancel marking: %s", name());
751   if (is_concurrent_mark_in_progress()) {
752     set_mark_incomplete();
753   }
754   _task_queues->clear();
755   ref_processor()->abandon_partial_discovery();
756   set_concurrent_mark_in_progress(false);
757 }
758 
759 ShenandoahGeneration::ShenandoahGeneration(GenerationMode generation_mode,
760                                            uint max_workers,
761                                            size_t max_capacity,
762                                            size_t soft_max_capacity) :
763   _generation_mode(generation_mode),
764   _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
765   _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
766   _affiliated_region_count(0), _used(0), _bytes_allocated_since_gc_start(0),
767   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
768   _adjusted_capacity(soft_max_capacity), _heuristics(nullptr) {
769   _is_marking_complete.set();
770   assert(max_workers > 0, "At least one queue");
771   for (uint i = 0; i < max_workers; ++i) {
772     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
773     _task_queues->register_queue(i, task_queue);
774   }
775 }
776 
777 ShenandoahGeneration::~ShenandoahGeneration() {
778   for (uint i = 0; i < _task_queues->size(); ++i) {
779     ShenandoahObjToScanQueue* q = _task_queues->queue(i);
780     delete q;
781   }
782   delete _task_queues;
783 }
784 
785 void ShenandoahGeneration::reserve_task_queues(uint workers) {
786   _task_queues->reserve(workers);
787 }
788 
789 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const {
790   return nullptr;
791 }
792 
793 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
794   assert(generation_mode() == YOUNG, "Should only scan remembered set for young generation.");
795 
796   ShenandoahHeap* const heap = ShenandoahHeap::heap();
797   uint nworkers = heap->workers()->active_workers();
798   reserve_task_queues(nworkers);
799 
800   ShenandoahReferenceProcessor* rp = ref_processor();
801   ShenandoahRegionChunkIterator work_list(nworkers);
802   ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent);
803   heap->assert_gc_workers(nworkers);
804   heap->workers()->run_task(&task);
805 }
806 
807 void ShenandoahGeneration::increment_affiliated_region_count() {
808   _affiliated_region_count++;
809 }
810 
811 void ShenandoahGeneration::decrement_affiliated_region_count() {
812   _affiliated_region_count--;
813 }
814 
815 void ShenandoahGeneration::clear_used() {
816   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
817   // Do this atomically to assure visibility to other threads, even though these other threads may be idle "right now"..
818   Atomic::store(&_used, (size_t)0);
819 }
820 
821 void ShenandoahGeneration::increase_used(size_t bytes) {
822   Atomic::add(&_used, bytes);
823 }
824 
825 void ShenandoahGeneration::decrease_used(size_t bytes) {
826   assert(_used >= bytes, "cannot reduce bytes used by generation below zero");
827   Atomic::sub(&_used, bytes);
828 }
829 
830 size_t ShenandoahGeneration::used_regions() const {
831   return _affiliated_region_count;
832 }
833 
834 size_t ShenandoahGeneration::free_unaffiliated_regions() const {
835   size_t result = soft_max_capacity() / ShenandoahHeapRegion::region_size_bytes();
836   if (_affiliated_region_count > result) {
837     result = 0;                 // If old-gen is loaning regions to young-gen, affiliated regions may exceed capacity temporarily.
838   } else {
839     result -= _affiliated_region_count;
840   }
841   return result;
842 }
843 
844 size_t ShenandoahGeneration::used_regions_size() const {
845   return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes();
846 }
847 
848 size_t ShenandoahGeneration::available() const {
849   size_t in_use = used();
850   size_t soft_capacity = soft_max_capacity();
851   return in_use > soft_capacity ? 0 : soft_capacity - in_use;
852 }
853 
854 size_t ShenandoahGeneration::adjust_available(intptr_t adjustment) {
855   _adjusted_capacity = soft_max_capacity() + adjustment;
856   return _adjusted_capacity;
857 }
858 
859 size_t ShenandoahGeneration::unadjust_available() {
860   _adjusted_capacity = soft_max_capacity();
861   return _adjusted_capacity;
862 }
863 
864 size_t ShenandoahGeneration::adjusted_available() const {
865   size_t in_use = used();
866   size_t capacity = _adjusted_capacity;
867   return in_use > capacity ? 0 : capacity - in_use;
868 }
869 
870 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
871   heuristics()->record_success_concurrent(abbreviated);
872   ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent();
873 }
874 
875 void ShenandoahGeneration::record_success_degenerated() {
876   heuristics()->record_success_degenerated();
877   ShenandoahHeap::heap()->shenandoah_policy()->record_success_degenerated();
878 }