1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 28 #include "gc/shenandoah/shenandoahGeneration.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.hpp"
 30 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 34 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 35 #include "gc/shenandoah/shenandoahUtils.hpp"
 36 #include "gc/shenandoah/shenandoahVerifier.hpp"
 37 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 38 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 39 
 40 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 41  private:
 42   ShenandoahHeap* _heap;
 43   ShenandoahMarkingContext* const _ctx;
 44  public:
 45   ShenandoahResetUpdateRegionStateClosure() :
 46     _heap(ShenandoahHeap::heap()),
 47     _ctx(_heap->marking_context()) {}
 48 
 49   void heap_region_do(ShenandoahHeapRegion* r) override {
 50     if (_heap->is_bitmap_slice_committed(r)) {
 51       _ctx->clear_bitmap(r);
 52     }
 53 
 54     if (r->is_active()) {
 55       // Reset live data and set TAMS optimistically. We would recheck these under the pause
 56       // anyway to capture any updates that happened since now.
 57       _ctx->capture_top_at_mark_start(r);
 58       r->clear_live_data();
 59     }
 60   }
 61 
 62   bool is_thread_safe() override { return true; }
 63 };
 64 
 65 class ShenandoahResetBitmapTask : public ShenandoahHeapRegionClosure {
 66  private:
 67   ShenandoahHeap* _heap;
 68   ShenandoahMarkingContext* const _ctx;
 69  public:
 70   ShenandoahResetBitmapTask() :
 71     _heap(ShenandoahHeap::heap()),
 72     _ctx(_heap->marking_context()) {}
 73 
 74   void heap_region_do(ShenandoahHeapRegion* region) {
 75     if (_heap->is_bitmap_slice_committed(region)) {
 76       _ctx->clear_bitmap(region);
 77     }
 78   }
 79 
 80   bool is_thread_safe() { return true; }
 81 };
 82 
 83 class ShenandoahMergeWriteTable: public ShenandoahHeapRegionClosure {
 84  private:
 85   ShenandoahHeap* _heap;
 86   RememberedScanner* _scanner;
 87  public:
 88   ShenandoahMergeWriteTable() : _heap(ShenandoahHeap::heap()), _scanner(_heap->card_scan()) {}
 89 
 90   virtual void heap_region_do(ShenandoahHeapRegion* r) override {
 91     if (r->is_old()) {
 92       _scanner->merge_write_table(r->bottom(), ShenandoahHeapRegion::region_size_words());
 93     }
 94   }
 95 
 96   virtual bool is_thread_safe() override {
 97     return true;
 98   }
 99 };
100 
101 class ShenandoahSquirrelAwayCardTable: public ShenandoahHeapRegionClosure {
102  private:
103   ShenandoahHeap* _heap;
104   RememberedScanner* _scanner;
105  public:
106   ShenandoahSquirrelAwayCardTable() :
107     _heap(ShenandoahHeap::heap()),
108     _scanner(_heap->card_scan()) {}
109 
110   void heap_region_do(ShenandoahHeapRegion* region) {
111     if (region->is_old()) {
112       _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words());
113     }
114   }
115 
116   bool is_thread_safe() { return true; }
117 };
118 
119 void ShenandoahGeneration::confirm_heuristics_mode() {
120   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
121     vm_exit_during_initialization(
122             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
123                     _heuristics->name()));
124   }
125   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
126     vm_exit_during_initialization(
127             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
128                     _heuristics->name()));
129   }
130 }
131 
132 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
133   _heuristics = gc_mode->initialize_heuristics(this);
134   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval);
135   confirm_heuristics_mode();
136   return _heuristics;
137 }
138 
139 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() {
140   return Atomic::load(&_bytes_allocated_since_gc_start);
141 }
142 
143 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() {
144   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
145 }
146 
147 void ShenandoahGeneration::increase_allocated(size_t bytes) {
148   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
149 }
150 
151 void ShenandoahGeneration::log_status(const char *msg) const {
152   typedef LogTarget(Info, gc, ergo) LogGcInfo;
153 
154   if (!LogGcInfo::is_enabled()) {
155     return;
156   }
157 
158   // Not under a lock here, so read each of these once to make sure
159   // byte size in proper unit and proper unit for byte size are consistent.
160   size_t v_used = used();
161   size_t v_used_regions = used_regions_size();
162   size_t v_soft_max_capacity = soft_max_capacity();
163   size_t v_max_capacity = max_capacity();
164   size_t v_available = available();
165   size_t v_humongous_waste = get_humongous_waste();
166   LogGcInfo::print("%s: %s generation used: " SIZE_FORMAT "%s, used regions: " SIZE_FORMAT "%s, "
167                    "humongous waste: " SIZE_FORMAT "%s, soft capacity: " SIZE_FORMAT "%s, max capacity: " SIZE_FORMAT "%s, "
168                    "available: " SIZE_FORMAT "%s", msg, name(),
169                    byte_size_in_proper_unit(v_used),              proper_unit_for_byte_size(v_used),
170                    byte_size_in_proper_unit(v_used_regions),      proper_unit_for_byte_size(v_used_regions),
171                    byte_size_in_proper_unit(v_humongous_waste),   proper_unit_for_byte_size(v_humongous_waste),
172                    byte_size_in_proper_unit(v_soft_max_capacity), proper_unit_for_byte_size(v_soft_max_capacity),
173                    byte_size_in_proper_unit(v_max_capacity),      proper_unit_for_byte_size(v_max_capacity),
174                    byte_size_in_proper_unit(v_available),         proper_unit_for_byte_size(v_available));
175 }
176 
177 void ShenandoahGeneration::reset_mark_bitmap() {
178   ShenandoahHeap* heap = ShenandoahHeap::heap();
179   heap->assert_gc_workers(heap->workers()->active_workers());
180 
181   set_mark_incomplete();
182 
183   ShenandoahResetBitmapTask task;
184   parallel_heap_region_iterate(&task);
185 }
186 
187 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations.
188 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the
189 // location of the card table.  So the interim implementation of swap_remembered_set will copy the write-table
190 // onto the read-table and will then clear the write-table.
191 void ShenandoahGeneration::swap_remembered_set() {
192   // Must be sure that marking is complete before we swap remembered set.
193   ShenandoahHeap* heap = ShenandoahHeap::heap();
194   heap->assert_gc_workers(heap->workers()->active_workers());
195   shenandoah_assert_safepoint();
196 
197   // TODO: Eventually, we want replace this with a constant-time exchange of pointers.
198   ShenandoahSquirrelAwayCardTable task;
199   heap->old_generation()->parallel_heap_region_iterate(&task);
200 }
201 
202 // If a concurrent cycle fails _after_ the card table has been swapped we need to update the read card
203 // table with any writes that have occurred during the transition to the degenerated cycle. Without this,
204 // newly created objects which are only referenced by old objects could be lost when the remembered set
205 // is scanned during the degenerated mark.
206 void ShenandoahGeneration::merge_write_table() {
207   // This should only happen for degenerated cycles
208   ShenandoahHeap* heap = ShenandoahHeap::heap();
209   heap->assert_gc_workers(heap->workers()->active_workers());
210   shenandoah_assert_safepoint();
211 
212   ShenandoahMergeWriteTable task;
213   heap->old_generation()->parallel_heap_region_iterate(&task);
214 }
215 
216 void ShenandoahGeneration::prepare_gc() {
217   // Invalidate the marking context
218   set_mark_incomplete();
219 
220   // Capture Top At Mark Start for this generation (typically young) and reset mark bitmap.
221   ShenandoahResetUpdateRegionStateClosure cl;
222   parallel_heap_region_iterate(&cl);
223 }
224 
225 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* heap, bool* preselected_regions,
226                                                       ShenandoahCollectionSet* collection_set,
227                                                       size_t &consumed_by_advance_promotion) {
228   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
229   size_t regions_available_to_loan = 0;
230   size_t minimum_evacuation_reserve = ShenandoahOldCompactionReserve * region_size_bytes;
231   size_t old_regions_loaned_for_young_evac = 0;
232   consumed_by_advance_promotion = 0;
233 
234   ShenandoahGeneration* old_generation = heap->old_generation();
235   ShenandoahYoungGeneration* young_generation = heap->young_generation();
236   size_t old_evacuation_reserve = 0;
237   size_t num_regions = heap->num_regions();
238 
239   // During initialization and phase changes, it is more likely that fewer objects die young and old-gen
240   // memory is not yet full (or is in the process of being replaced).  During these times especially, it
241   // is beneficial to loan memory from old-gen to young-gen during the evacuation and update-refs phases
242   // of execution.
243 
244   // Calculate EvacuationReserve before PromotionReserve.  Evacuation is more critical than promotion.
245   // If we cannot evacuate old-gen, we will not be able to reclaim old-gen memory.  Promotions are less
246   // critical.  If we cannot promote, there may be degradation of young-gen memory because old objects
247   // accumulate there until they can be promoted.  This increases the young-gen marking and evacuation work.
248 
249   // Do not fill up old-gen memory with promotions.  Reserve some amount of memory for compaction purposes.
250   size_t young_evac_reserve_max = 0;
251 
252   // First priority is to reclaim the easy garbage out of young-gen.
253 
254   // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
255   size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
256   size_t young_evacuation_reserve = maximum_young_evacuation_reserve;
257   size_t excess_young;
258   if (young_generation->available() > young_evacuation_reserve) {
259     excess_young = young_generation->available() - young_evacuation_reserve;
260   } else {
261     young_evacuation_reserve = young_generation->available();
262     excess_young = 0;
263   }
264   size_t unaffiliated_young = young_generation->free_unaffiliated_regions() * region_size_bytes;
265   if (excess_young > unaffiliated_young) {
266     excess_young = unaffiliated_young;
267   } else {
268     // round down to multiple of region size
269     excess_young /= region_size_bytes;
270     excess_young *= region_size_bytes;
271   }
272   // excess_young is available to be transferred to OLD.  Assume that OLD will not request any more than had
273   // already been set aside for its promotion and evacuation needs at the end of previous GC.  No need to
274   // hold back memory for allocation runway.
275 
276   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
277 
278   // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted).
279   size_t maximum_old_evacuation_reserve =
280     maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent / (100 - ShenandoahOldEvacRatioPercent);
281   // Here's the algebra:
282   //  TotalEvacuation = OldEvacuation + YoungEvacuation
283   //  OldEvacuation = TotalEvacuation * (ShenandoahOldEvacRatioPercent/100)
284   //  OldEvacuation = YoungEvacuation * (ShenandoahOldEvacRatioPercent/100)/(1 - ShenandoahOldEvacRatioPercent/100)
285   //  OldEvacuation = YoungEvacuation * ShenandoahOldEvacRatioPercent/(100 - ShenandoahOldEvacRatioPercent)
286 
287   if (maximum_old_evacuation_reserve > old_generation->available()) {
288     maximum_old_evacuation_reserve = old_generation->available();
289   }
290 
291   // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates.  Third priority
292   // is to promote as much as we have room to promote.  However, if old-gen memory is in short supply, this means young
293   // GC is operating under "duress" and was unable to transfer the memory that we would normally expect.  In this case,
294   // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs
295   // through ALL of old-gen).  If there is some memory available in old-gen, we will use this for promotions as promotions
296   // do not add to the update-refs burden of GC.
297 
298   size_t old_promo_reserve;
299   if (old_heuristics->unprocessed_old_collection_candidates() > 0) {
300     // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen.  If this is
301     // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote.  Prioritize compaction
302     // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
303     old_evacuation_reserve = maximum_old_evacuation_reserve;
304     old_promo_reserve = 0;
305   } else {
306     // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
307     old_evacuation_reserve = 0;
308     old_promo_reserve = maximum_old_evacuation_reserve;
309   }
310 
311   // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
312   // So we limit the old-evacuation reserve to unfragmented memory.  Even so, old-evacuation is free to fill in nooks and
313   // crannies within existing partially used regions and it generally tries to do so.
314   size_t old_free_regions = old_generation->free_unaffiliated_regions();
315   size_t old_free_unfragmented = old_free_regions * region_size_bytes;
316   if (old_evacuation_reserve > old_free_unfragmented) {
317     size_t delta = old_evacuation_reserve - old_free_unfragmented;
318     old_evacuation_reserve -= delta;
319 
320     // Let promo consume fragments of old-gen memory.
321     old_promo_reserve += delta;
322   }
323   collection_set->establish_preselected(preselected_regions);
324   consumed_by_advance_promotion = _heuristics->select_aged_regions(old_promo_reserve, num_regions, preselected_regions);
325   assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
326   if (consumed_by_advance_promotion < old_promo_reserve) {
327     // If we're in a global collection, this memory can be used for old evacuations
328     old_evacuation_reserve += old_promo_reserve - consumed_by_advance_promotion;
329   }
330   heap->set_young_evac_reserve(young_evacuation_reserve);
331   heap->set_old_evac_reserve(old_evacuation_reserve);
332   heap->set_promoted_reserve(consumed_by_advance_promotion);
333 
334   // There is no need to expand OLD because all memory used here was set aside at end of previous GC
335 }
336 
337 // Having chosen the collection set, adjust the budgets for generational mode based on its composition.  Note
338 // that young_generation->available() now knows about recently discovered immediate garbage.
339 
340 void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* heap, ShenandoahCollectionSet* collection_set,
341                                                      size_t consumed_by_advance_promotion) {
342   // We may find that old_evacuation_reserve and/or loaned_for_young_evacuation are not fully consumed, in which case we may
343   //  be able to increase regions_available_to_loan
344 
345   // The role of adjust_evacuation_budgets() is to compute the correct value of regions_available_to_loan and to make
346   // effective use of this memory, including the remnant memory within these regions that may result from rounding loan to
347   // integral number of regions.  Excess memory that is available to be loaned is applied to an allocation supplement,
348   // which allows mutators to allocate memory beyond the current capacity of young-gen on the promise that the loan
349   // will be repaid as soon as we finish updating references for the recently evacuated collection set.
350 
351   // We cannot recalculate regions_available_to_loan by simply dividing old_generation->available() by region_size_bytes
352   // because the available memory may be distributed between many partially occupied regions that are already holding old-gen
353   // objects.  Memory in partially occupied regions is not "available" to be loaned.  Note that an increase in old-gen
354   // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
355   // to young-gen.
356 
357   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
358   ShenandoahOldGeneration* old_generation = heap->old_generation();
359   ShenandoahYoungGeneration* young_generation = heap->young_generation();
360 
361   // Preselected regions have been inserted into the collection set, so we no longer need the preselected array.
362   collection_set->abandon_preselected();
363 
364   size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
365   size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * old_evacuated);
366   size_t old_evacuation_reserve = heap->get_old_evac_reserve();
367 
368   if (old_evacuated_committed > old_evacuation_reserve) {
369     // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste
370     assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
371            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
372            old_evacuated_committed, old_evacuation_reserve);
373     old_evacuated_committed = old_evacuation_reserve;
374     // Leave old_evac_reserve as previously configured
375   } else if (old_evacuated_committed < old_evacuation_reserve) {
376     // This happens if the old-gen collection consumes less than full budget.
377     old_evacuation_reserve = old_evacuated_committed;
378     heap->set_old_evac_reserve(old_evacuation_reserve);
379   }
380 
381   size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted();
382   size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * young_advance_promoted);
383 
384   size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation();
385   size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * young_evacuated);
386 
387   assert(young_evacuated_reserve_used <= young_generation->available(), "Cannot evacuate more than is available in young");
388   heap->set_young_evac_reserve(young_evacuated_reserve_used);
389 
390   size_t old_available = old_generation->available();
391   // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
392   // and promotion reserves.  Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
393   // evac and update phases.
394   size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
395   assert(old_available >= old_consumed, "Cannot consume more than is available");
396   size_t excess_old = old_available - old_consumed;
397   size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
398   size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
399   assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available");
400 
401   // Make sure old_evac_committed is unaffiliated
402   if (old_evacuated_committed > 0) {
403     if (unaffiliated_old > old_evacuated_committed) {
404       size_t giveaway = unaffiliated_old - old_evacuated_committed;
405       size_t giveaway_regions = giveaway / region_size_bytes;  // round down
406       if (giveaway_regions > 0) {
407         excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes);
408       } else {
409         excess_old = 0;
410       }
411     } else {
412       excess_old = 0;
413     }
414   }
415 
416   // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
417   // runway during evacuation and update-refs.
418   size_t regions_to_xfer = 0;
419   if (excess_old > unaffiliated_old) {
420     // we can give back unaffiliated_old (all of unaffiliated is excess)
421     if (unaffiliated_old_regions > 0) {
422       regions_to_xfer = unaffiliated_old_regions;
423     }
424   } else if (unaffiliated_old_regions > 0) {
425     // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
426     size_t excess_regions = excess_old / region_size_bytes;
427     size_t regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
428   }
429 
430   if (regions_to_xfer > 0) {
431     bool result = heap->generation_sizer()->transfer_to_young(regions_to_xfer);
432     assert(excess_old > regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old");
433     excess_old -= regions_to_xfer * region_size_bytes;
434     log_info(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation",
435                        result? "Successfully": "Unsuccessfully", regions_to_xfer);
436   }
437 
438   // Add in the excess_old memory to hold unanticipated promotions, if any.  If there are more unanticipated
439   // promotions than fit in reserved memory, they will be deferred until a future GC pass.
440   size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
441   heap->set_promoted_reserve(total_promotion_reserve);
442   heap->reset_promoted_expended();
443 }
444 
445 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
446   ShenandoahHeap* heap = ShenandoahHeap::heap();
447   ShenandoahCollectionSet* collection_set = heap->collection_set();
448 
449   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
450   assert(!is_old(), "Only YOUNG and GLOBAL GC perform evacuations");
451   {
452     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
453                             ShenandoahPhaseTimings::degen_gc_final_update_region_states);
454     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
455     parallel_heap_region_iterate(&cl);
456 
457     if (is_young()) {
458       // We always need to update the watermark for old regions. If there
459       // are mixed collections pending, we also need to synchronize the
460       // pinned status for old regions. Since we are already visiting every
461       // old region here, go ahead and sync the pin status too.
462       ShenandoahFinalMarkUpdateRegionStateClosure old_cl(nullptr);
463       heap->old_generation()->parallel_heap_region_iterate(&old_cl);
464     }
465   }
466 
467   {
468     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
469                             ShenandoahPhaseTimings::degen_gc_choose_cset);
470 
471     collection_set->clear();
472     ShenandoahHeapLocker locker(heap->lock());
473     if (heap->mode()->is_generational()) {
474       size_t consumed_by_advance_promotion;
475       bool* preselected_regions = (bool*) alloca(heap->num_regions() * sizeof(bool));
476       for (unsigned int i = 0; i < heap->num_regions(); i++) {
477         preselected_regions[i] = false;
478       }
479 
480       // TODO: young_available can include available (between top() and end()) within each young region that is not
481       // part of the collection set.  Making this memory available to the young_evacuation_reserve allows a larger
482       // young collection set to be chosen when available memory is under extreme pressure.  Implementing this "improvement"
483       // is tricky, because the incremental construction of the collection set actually changes the amount of memory
484       // available to hold evacuated young-gen objects.  As currently implemented, the memory that is available within
485       // non-empty regions that are not selected as part of the collection set can be allocated by the mutator while
486       // GC is evacuating and updating references.
487 
488       // Budgeting parameters to compute_evacuation_budgets are passed by reference.
489       compute_evacuation_budgets(heap, preselected_regions, collection_set, consumed_by_advance_promotion);
490       _heuristics->choose_collection_set(collection_set, heap->old_heuristics());
491       if (!collection_set->is_empty()) {
492         // only make use of evacuation budgets when we are evacuating
493         adjust_evacuation_budgets(heap, collection_set, consumed_by_advance_promotion);
494       }
495     } else {
496       _heuristics->choose_collection_set(collection_set, heap->old_heuristics());
497     }
498   }
499 
500   // Freeset construction uses reserve quantities if they are valid
501   heap->set_evacuation_reserve_quantities(true);
502   {
503     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
504                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
505     ShenandoahHeapLocker locker(heap->lock());
506     size_t young_cset_regions, old_cset_regions;
507 
508     // We are preparing for evacuation.  At this time, we ignore cset region tallies.
509     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions);
510     heap->free_set()->rebuild(young_cset_regions, old_cset_regions);
511   }
512   heap->set_evacuation_reserve_quantities(false);
513 }
514 
515 bool ShenandoahGeneration::is_bitmap_clear() {
516   ShenandoahHeap* heap = ShenandoahHeap::heap();
517   ShenandoahMarkingContext* context = heap->marking_context();
518   size_t num_regions = heap->num_regions();
519   for (size_t idx = 0; idx < num_regions; idx++) {
520     ShenandoahHeapRegion* r = heap->get_region(idx);
521     if (contains(r) && r->is_affiliated()) {
522       if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) &&
523           !context->is_bitmap_clear_range(r->bottom(), r->end())) {
524         return false;
525       }
526     }
527   }
528   return true;
529 }
530 
531 bool ShenandoahGeneration::is_mark_complete() {
532   return _is_marking_complete.is_set();
533 }
534 
535 void ShenandoahGeneration::set_mark_complete() {
536   _is_marking_complete.set();
537 }
538 
539 void ShenandoahGeneration::set_mark_incomplete() {
540   _is_marking_complete.unset();
541 }
542 
543 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
544   assert(is_mark_complete(), "Marking must be completed.");
545   return ShenandoahHeap::heap()->marking_context();
546 }
547 
548 void ShenandoahGeneration::cancel_marking() {
549   log_info(gc)("Cancel marking: %s", name());
550   if (is_concurrent_mark_in_progress()) {
551     set_mark_incomplete();
552   }
553   _task_queues->clear();
554   ref_processor()->abandon_partial_discovery();
555   set_concurrent_mark_in_progress(false);
556 }
557 
558 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
559                                            uint max_workers,
560                                            size_t max_capacity,
561                                            size_t soft_max_capacity) :
562   _type(type),
563   _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
564   _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
565   _collection_thread_time_s(0.0),
566   _affiliated_region_count(0), _humongous_waste(0), _used(0), _bytes_allocated_since_gc_start(0),
567   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
568   _heuristics(nullptr) {
569   _is_marking_complete.set();
570   assert(max_workers > 0, "At least one queue");
571   for (uint i = 0; i < max_workers; ++i) {
572     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
573     _task_queues->register_queue(i, task_queue);
574   }
575 }
576 
577 ShenandoahGeneration::~ShenandoahGeneration() {
578   for (uint i = 0; i < _task_queues->size(); ++i) {
579     ShenandoahObjToScanQueue* q = _task_queues->queue(i);
580     delete q;
581   }
582   delete _task_queues;
583 }
584 
585 void ShenandoahGeneration::reserve_task_queues(uint workers) {
586   _task_queues->reserve(workers);
587 }
588 
589 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const {
590   return nullptr;
591 }
592 
593 void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
594   assert(is_young(), "Should only scan remembered set for young generation.");
595 
596   ShenandoahHeap* const heap = ShenandoahHeap::heap();
597   uint nworkers = heap->workers()->active_workers();
598   reserve_task_queues(nworkers);
599 
600   ShenandoahReferenceProcessor* rp = ref_processor();
601   ShenandoahRegionChunkIterator work_list(nworkers);
602   ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &work_list, is_concurrent);
603   heap->assert_gc_workers(nworkers);
604   heap->workers()->run_task(&task);
605   if (ShenandoahEnableCardStats) {
606     assert(heap->card_scan() != nullptr, "Not generational");
607     heap->card_scan()->log_card_stats(nworkers, CARD_STAT_SCAN_RS);
608   }
609 }
610 
611 size_t ShenandoahGeneration::increment_affiliated_region_count() {
612   shenandoah_assert_heaplocked_or_fullgc_safepoint();
613   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
614   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
615   // a coherent value.
616   _affiliated_region_count++;
617   return _affiliated_region_count;
618 }
619 
620 size_t ShenandoahGeneration::decrement_affiliated_region_count() {
621   shenandoah_assert_heaplocked_or_fullgc_safepoint();
622   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
623   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
624   // a coherent value.
625   _affiliated_region_count--;
626   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
627   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
628          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
629          "used + humongous cannot exceed regions");
630   return _affiliated_region_count;
631 }
632 
633 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) {
634   shenandoah_assert_heaplocked_or_fullgc_safepoint();
635   _affiliated_region_count += delta;
636   return _affiliated_region_count;
637 }
638 
639 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) {
640   shenandoah_assert_heaplocked_or_fullgc_safepoint();
641   assert(_affiliated_region_count > delta, "Affiliated region count cannot be negative");
642 
643   _affiliated_region_count -= delta;
644   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
645   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
646          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
647          "used + humongous cannot exceed regions");
648   return _affiliated_region_count;
649 }
650 
651 void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) {
652   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
653   _affiliated_region_count = num_regions;
654   _used = num_bytes;
655   _humongous_waste = humongous_waste;
656 }
657 
658 void ShenandoahGeneration::increase_used(size_t bytes) {
659   Atomic::add(&_used, bytes);
660   // This detects arithmetic wraparound on _used.  Non-generational mode does not keep track of _affiliated_region_count
661   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
662   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
663          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
664          "used cannot exceed regions");
665 }
666 
667 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) {
668   if (bytes > 0) {
669     Atomic::add(&_humongous_waste, bytes);
670   }
671 }
672 
673 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) {
674   if (bytes > 0) {
675     assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes),
676            "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes);
677     Atomic::sub(&_humongous_waste, bytes);
678   }
679 }
680 
681 void ShenandoahGeneration::decrease_used(size_t bytes) {
682   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
683   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
684          (_used >= bytes), "cannot reduce bytes used by generation below zero");
685   Atomic::sub(&_used, bytes);
686 
687   // Non-generational mode does not maintain affiliated region counts
688   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
689   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
690          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
691          "Affiliated regions must hold more than what is currently used");
692 }
693 
694 size_t ShenandoahGeneration::used_regions() const {
695   return _affiliated_region_count;
696 }
697 
698 size_t ShenandoahGeneration::free_unaffiliated_regions() const {
699   size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes();
700   if (_affiliated_region_count > result) {
701     result = 0;
702   } else {
703     result -= _affiliated_region_count;
704   }
705   return result;
706 }
707 
708 size_t ShenandoahGeneration::used_regions_size() const {
709   return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes();
710 }
711 
712 size_t ShenandoahGeneration::available() const {
713   size_t in_use = used() + get_humongous_waste();
714   size_t capacity = max_capacity();
715   return in_use > capacity ? 0 : capacity - in_use;
716 }
717 
718 size_t ShenandoahGeneration::soft_available() const {
719   size_t in_use = used() + get_humongous_waste();
720   size_t soft_capacity = soft_max_capacity();
721   return in_use > soft_capacity ? 0 : soft_capacity - in_use;
722 }
723 
724 void ShenandoahGeneration::increase_capacity(size_t increment) {
725   shenandoah_assert_heaplocked_or_safepoint();
726 
727   // We do not enforce that new capacity >= heap->max_size_for(this).  The maximum generation size is treated as a rule of thumb
728   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
729   // in place.
730   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
731   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
732          (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
733   assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
734   _max_capacity += increment;
735 
736   // This detects arithmetic wraparound on _used
737   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
738   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
739          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
740          "Affiliated regions must hold more than what is currently used");
741 }
742 
743 void ShenandoahGeneration::decrease_capacity(size_t decrement) {
744   shenandoah_assert_heaplocked_or_safepoint();
745 
746   // We do not enforce that new capacity >= heap->min_size_for(this).  The minimum generation size is treated as a rule of thumb
747   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
748   // in place.
749   assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
750   assert(_max_capacity >= decrement, "Generation capacity cannot be negative");
751 
752   _max_capacity -= decrement;
753 
754   // This detects arithmetic wraparound on _used
755   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
756   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
757          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
758          "Affiliated regions must hold more than what is currently used");
759   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
760   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
761          (_used <= _max_capacity), "Cannot use more than capacity");
762   // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
763   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
764          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity),
765          "Cannot use more than capacity");
766 }
767 
768 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
769   heuristics()->record_success_concurrent(abbreviated);
770   ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent();
771 }
772 
773 void ShenandoahGeneration::record_success_degenerated() {
774   heuristics()->record_success_degenerated();
775   ShenandoahHeap::heap()->shenandoah_policy()->record_success_degenerated();
776 }
777 
778 void ShenandoahGeneration::add_collection_time(double time_seconds) {
779   shenandoah_assert_control_or_vm_thread();
780   _collection_thread_time_s += time_seconds;
781 }
782 
783 double ShenandoahGeneration::reset_collection_time() {
784   double t = _collection_thread_time_s;
785   _collection_thread_time_s = 0.0;
786   return t;
787 }
788