1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 27 #include "gc/shenandoah/shenandoahAsserts.hpp"
 28 #include "gc/shenandoah/shenandoahCardTable.hpp"
 29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 30 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 32 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 33 #include "gc/shenandoah/shenandoahHeap.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 35 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 36 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
 37 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 38 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 39 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 41 #include "gc/shenandoah/shenandoahUtils.hpp"
 42 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 43 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 44 #include "runtime/threads.hpp"
 45 #include "utilities/events.hpp"
 46 
 47 class ShenandoahFlushAllSATB : public ThreadClosure {
 48 private:
 49   SATBMarkQueueSet& _satb_qset;
 50 
 51 public:
 52   explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) :
 53     _satb_qset(satb_qset) {}
 54 
 55   void do_thread(Thread* thread) override {
 56     // Transfer any partial buffer to the qset for completed buffer processing.
 57     _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
 58   }
 59 };
 60 
 61 class ShenandoahProcessOldSATB : public SATBBufferClosure {
 62 private:
 63   ShenandoahObjToScanQueue*       _queue;
 64   ShenandoahHeap*                 _heap;
 65   ShenandoahMarkingContext* const _mark_context;
 66   size_t                          _trashed_oops;
 67 
 68 public:
 69   explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) :
 70     _queue(q),
 71     _heap(ShenandoahHeap::heap()),
 72     _mark_context(_heap->marking_context()),
 73     _trashed_oops(0) {}
 74 
 75   void do_buffer(void** buffer, size_t size) override {
 76     assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
 77     for (size_t i = 0; i < size; ++i) {
 78       oop *p = (oop *) &buffer[i];
 79       ShenandoahHeapRegion* region = _heap->heap_region_containing(*p);
 80       if (region->is_old() && region->is_active()) {
 81           ShenandoahMark::mark_through_ref<oop, OLD>(p, _queue, nullptr, _mark_context, false);
 82       } else {
 83         _trashed_oops++;
 84       }
 85     }
 86   }
 87 
 88   size_t trashed_oops() const {
 89     return _trashed_oops;
 90   }
 91 };
 92 
 93 class ShenandoahPurgeSATBTask : public WorkerTask {
 94 private:
 95   ShenandoahObjToScanQueueSet* _mark_queues;
 96   // Keep track of the number of oops that are not transferred to mark queues.
 97   // This is volatile because workers update it, but the vm thread reads it.
 98   volatile size_t             _trashed_oops;
 99 
100 public:
101   explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) :
102     WorkerTask("Purge SATB"),
103     _mark_queues(queues),
104     _trashed_oops(0) {
105     Threads::change_thread_claim_token();
106   }
107 
108   ~ShenandoahPurgeSATBTask() {
109     if (_trashed_oops > 0) {
110       log_debug(gc)("Purged %zu oops from old generation SATB buffers", _trashed_oops);
111     }
112   }
113 
114   void work(uint worker_id) override {
115     ShenandoahParallelWorkerSession worker_session(worker_id);
116     ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
117     ShenandoahFlushAllSATB flusher(satb_queues);
118     Threads::possibly_parallel_threads_do(true /* is_par */, &flusher);
119 
120     ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id);
121     ShenandoahProcessOldSATB processor(mark_queue);
122     while (satb_queues.apply_closure_to_completed_buffer(&processor)) {}
123 
124     Atomic::add(&_trashed_oops, processor.trashed_oops());
125   }
126 };
127 
128 class ShenandoahTransferOldSATBTask : public WorkerTask {
129   ShenandoahSATBMarkQueueSet&  _satb_queues;
130   ShenandoahObjToScanQueueSet* _mark_queues;
131   // Keep track of the number of oops that are not transferred to mark queues.
132   // This is volatile because workers update it, but the control thread reads it.
133   volatile size_t              _trashed_oops;
134 
135 public:
136   explicit ShenandoahTransferOldSATBTask(ShenandoahSATBMarkQueueSet& satb_queues, ShenandoahObjToScanQueueSet* mark_queues) :
137     WorkerTask("Transfer SATB"),
138     _satb_queues(satb_queues),
139     _mark_queues(mark_queues),
140     _trashed_oops(0) {}
141 
142   ~ShenandoahTransferOldSATBTask() {
143     if (_trashed_oops > 0) {
144       log_debug(gc)("Purged %zu oops from old generation SATB buffers", _trashed_oops);
145     }
146   }
147 
148   void work(uint worker_id) override {
149     ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id);
150     ShenandoahProcessOldSATB processor(mark_queue);
151     while (_satb_queues.apply_closure_to_completed_buffer(&processor)) {}
152 
153     Atomic::add(&_trashed_oops, processor.trashed_oops());
154   }
155 };
156 
157 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
158 private:
159   uint                    _nworkers;
160   ShenandoahHeapRegion**  _coalesce_and_fill_region_array;
161   uint                    _coalesce_and_fill_region_count;
162   volatile bool           _is_preempted;
163 
164 public:
165   ShenandoahConcurrentCoalesceAndFillTask(uint nworkers,
166                                           ShenandoahHeapRegion** coalesce_and_fill_region_array,
167                                           uint region_count) :
168     WorkerTask("Shenandoah Concurrent Coalesce and Fill"),
169     _nworkers(nworkers),
170     _coalesce_and_fill_region_array(coalesce_and_fill_region_array),
171     _coalesce_and_fill_region_count(region_count),
172     _is_preempted(false) {
173   }
174 
175   void work(uint worker_id) override {
176     ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::conc_coalesce_and_fill, ShenandoahPhaseTimings::ScanClusters, worker_id);
177     for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) {
178       ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx];
179       if (r->is_humongous()) {
180         // There is only one object in this region and it is not garbage,
181         // so no need to coalesce or fill.
182         continue;
183       }
184 
185       if (!r->oop_coalesce_and_fill(true)) {
186         // Coalesce and fill has been preempted
187         Atomic::store(&_is_preempted, true);
188         return;
189       }
190     }
191   }
192 
193   // Value returned from is_completed() is only valid after all worker thread have terminated.
194   bool is_completed() {
195     return !Atomic::load(&_is_preempted);
196   }
197 };
198 
199 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity)
200   : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity),
201     _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
202     _old_heuristics(nullptr),
203     _region_balance(0),
204     _promoted_reserve(0),
205     _promoted_expended(0),
206     _promotion_potential(0),
207     _pad_for_promote_in_place(0),
208     _promotable_humongous_regions(0),
209     _promotable_regular_regions(0),
210     _is_parsable(true),
211     _card_scan(nullptr),
212     _state(WAITING_FOR_BOOTSTRAP),
213     _growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION),
214     _min_growth_before_compaction ((ShenandoahMinOldGenGrowthPercent * FRACTIONAL_DENOMINATOR) / 100)
215 {
216   _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
217   // Always clear references for old generation
218   ref_processor()->set_soft_reference_policy(true);
219 
220   if (ShenandoahCardBarrier) {
221     ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
222     size_t card_count = card_table->cards_required(ShenandoahHeap::heap()->reserved_region().word_size());
223     auto rs = new ShenandoahDirectCardMarkRememberedSet(card_table, card_count);
224     _card_scan = new ShenandoahScanRemembered(rs);
225   }
226 }
227 
228 void ShenandoahOldGeneration::set_promoted_reserve(size_t new_val) {
229   shenandoah_assert_heaplocked_or_safepoint();
230   _promoted_reserve = new_val;
231 }
232 
233 size_t ShenandoahOldGeneration::get_promoted_reserve() const {
234   return _promoted_reserve;
235 }
236 
237 void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) {
238   shenandoah_assert_heaplocked_or_safepoint();
239   _promoted_reserve += increment;
240 }
241 
242 void ShenandoahOldGeneration::reset_promoted_expended() {
243   shenandoah_assert_heaplocked_or_safepoint();
244   Atomic::store(&_promoted_expended, (size_t) 0);
245 }
246 
247 size_t ShenandoahOldGeneration::expend_promoted(size_t increment) {
248   shenandoah_assert_heaplocked_or_safepoint();
249   assert(get_promoted_expended() + increment <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
250   return Atomic::add(&_promoted_expended, increment);
251 }
252 
253 size_t ShenandoahOldGeneration::unexpend_promoted(size_t decrement) {
254   return Atomic::sub(&_promoted_expended, decrement);
255 }
256 
257 size_t ShenandoahOldGeneration::get_promoted_expended() const {
258   return Atomic::load(&_promoted_expended);
259 }
260 
261 bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) const {
262   assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
263 
264   const size_t requested_bytes = req.size() * HeapWordSize;
265   // The promotion reserve may also be used for evacuations. If we can promote this object,
266   // then we can also evacuate it.
267   if (can_promote(requested_bytes)) {
268     // The promotion reserve should be able to accommodate this request. The request
269     // might still fail if alignment with the card table increases the size. The request
270     // may also fail if the heap is badly fragmented and the free set cannot find room for it.
271     return true;
272   }
273 
274   if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
275     // The promotion reserve cannot accommodate this plab request. Check if we still have room for
276     // evacuations. Note that we cannot really know how much of the plab will be used for evacuations,
277     // so here we only check that some evacuation reserve still exists.
278     return get_evacuation_reserve() > 0;
279   }
280 
281   // This is a shared allocation request. We've already checked that it can't be promoted, so if
282   // it is a promotion, we return false. Otherwise, it is a shared evacuation request, and we allow
283   // the allocation to proceed.
284   return !req.is_promotion();
285 }
286 
287 void
288 ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAllocRequest &req) {
289   // Note: Even when a mutator is performing a promotion outside a LAB, we use a 'shared_gc' request.
290   if (req.is_gc_alloc()) {
291     const size_t actual_size = req.actual_size() * HeapWordSize;
292     if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
293       // We've created a new plab. Now we configure it whether it will be used for promotions
294       // and evacuations - or just evacuations.
295       Thread* thread = Thread::current();
296       ShenandoahThreadLocalData::reset_plab_promoted(thread);
297 
298       // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
299       // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
300       if (can_promote(actual_size)) {
301         // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
302         // When we retire this plab, we'll unexpend what we don't really use.
303         expend_promoted(actual_size);
304         ShenandoahThreadLocalData::enable_plab_promotions(thread);
305         ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size);
306       } else {
307         // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
308         ShenandoahThreadLocalData::disable_plab_promotions(thread);
309         ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
310       }
311     } else if (req.is_promotion()) {
312       // Shared promotion.
313       expend_promoted(actual_size);
314     }
315   }
316 }
317 
318 size_t ShenandoahOldGeneration::get_live_bytes_after_last_mark() const {
319   return _live_bytes_after_last_mark;
320 }
321 
322 void ShenandoahOldGeneration::set_live_bytes_after_last_mark(size_t bytes) {
323   if (bytes == 0) {
324     // Restart search for best old-gen size to the initial state
325     _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
326     _growth_before_compaction = INITIAL_GROWTH_BEFORE_COMPACTION;
327   } else {
328     _live_bytes_after_last_mark = bytes;
329     _growth_before_compaction /= 2;
330     if (_growth_before_compaction < _min_growth_before_compaction) {
331       _growth_before_compaction = _min_growth_before_compaction;
332     }
333   }
334 }
335 
336 void ShenandoahOldGeneration::handle_failed_transfer() {
337   _old_heuristics->trigger_cannot_expand();
338 }
339 
340 size_t ShenandoahOldGeneration::usage_trigger_threshold() const {
341   size_t result = _live_bytes_after_last_mark + (_live_bytes_after_last_mark * _growth_before_compaction) / FRACTIONAL_DENOMINATOR;
342   return result;
343 }
344 
345 bool ShenandoahOldGeneration::contains(ShenandoahAffiliation affiliation) const {
346   return affiliation == OLD_GENERATION;
347 }
348 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const {
349   return region->is_old();
350 }
351 
352 void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
353   ShenandoahIncludeRegionClosure<OLD_GENERATION> old_regions_cl(cl);
354   ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions_cl);
355 }
356 
357 void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
358   ShenandoahIncludeRegionClosure<OLD_GENERATION> old_regions_cl(cl);
359   ShenandoahHeap::heap()->heap_region_iterate(&old_regions_cl);
360 }
361 
362 void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) {
363   ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress);
364 }
365 
366 bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() {
367   return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress();
368 }
369 
370 void ShenandoahOldGeneration::cancel_marking() {
371   if (is_concurrent_mark_in_progress()) {
372     log_debug(gc)("Abandon SATB buffers");
373     ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
374   }
375 
376   ShenandoahGeneration::cancel_marking();
377 }
378 
379 void ShenandoahOldGeneration::cancel_gc() {
380   shenandoah_assert_safepoint();
381   if (is_idle()) {
382 #ifdef ASSERT
383     validate_waiting_for_bootstrap();
384 #endif
385   } else {
386     log_info(gc)("Terminating old gc cycle.");
387     // Stop marking
388     cancel_marking();
389     // Stop tracking old regions
390     abandon_collection_candidates();
391     // Remove old generation access to young generation mark queues
392     ShenandoahHeap::heap()->young_generation()->set_old_gen_task_queues(nullptr);
393     // Transition to IDLE now.
394     transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
395   }
396 }
397 
398 void ShenandoahOldGeneration::prepare_gc() {
399   // Now that we have made the old generation parsable, it is safe to reset the mark bitmap.
400   assert(state() != FILLING, "Cannot reset old without making it parsable");
401 
402   ShenandoahGeneration::prepare_gc();
403 }
404 
405 bool ShenandoahOldGeneration::entry_coalesce_and_fill() {
406   ShenandoahHeap* const heap = ShenandoahHeap::heap();
407 
408   static const char* msg = "Coalescing and filling (Old)";
409   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
410 
411   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
412   EventMark em("%s", msg);
413   ShenandoahWorkerScope scope(heap->workers(),
414                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
415                               msg);
416 
417   return coalesce_and_fill();
418 }
419 
420 // Make the old generation regions parsable, so they can be safely
421 // scanned when looking for objects in memory indicated by dirty cards.
422 bool ShenandoahOldGeneration::coalesce_and_fill() {
423   transition_to(FILLING);
424 
425   // This code will see the same set of regions to fill on each resumption as it did
426   // on the initial run. That's okay because each region keeps track of its own coalesce
427   // and fill state. Regions that were filled on a prior attempt will not try to fill again.
428   uint coalesce_and_fill_regions_count = _old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
429   assert(coalesce_and_fill_regions_count <= ShenandoahHeap::heap()->num_regions(), "Sanity");
430   if (coalesce_and_fill_regions_count == 0) {
431     // No regions need to be filled.
432     abandon_collection_candidates();
433     return true;
434   }
435 
436   ShenandoahHeap* const heap = ShenandoahHeap::heap();
437   WorkerThreads* workers = heap->workers();
438   uint nworkers = workers->active_workers();
439   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count);
440 
441   log_debug(gc)("Starting (or resuming) coalesce-and-fill of " UINT32_FORMAT " old heap regions", coalesce_and_fill_regions_count);
442   workers->run_task(&task);
443   if (task.is_completed()) {
444     // We no longer need to track regions that need to be coalesced and filled.
445     abandon_collection_candidates();
446     return true;
447   } else {
448     // Coalesce-and-fill has been preempted. We'll finish that effort in the future.  Do not invoke
449     // ShenandoahGeneration::prepare_gc() until coalesce-and-fill is done because it resets the mark bitmap
450     // and invokes set_mark_incomplete().  Coalesce-and-fill depends on the mark bitmap.
451     log_debug(gc)("Suspending coalesce-and-fill of old heap regions");
452     return false;
453   }
454 }
455 
456 void ShenandoahOldGeneration::concurrent_transfer_pointers_from_satb() const {
457   const ShenandoahHeap* heap = ShenandoahHeap::heap();
458   assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking.");
459   log_debug(gc)("Transfer SATB buffers");
460 
461   // Step 1. All threads need to 'complete' partially filled, thread local SATB buffers. This
462   // is accomplished in ShenandoahConcurrentGC::complete_abbreviated_cycle using a Handshake
463   // operation.
464   // Step 2. Use worker threads to transfer oops from old, active regions in the completed
465   // SATB buffers to old generation mark queues.
466   ShenandoahSATBMarkQueueSet& satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
467   ShenandoahTransferOldSATBTask transfer_task(satb_queues, task_queues());
468   heap->workers()->run_task(&transfer_task);
469 }
470 
471 void ShenandoahOldGeneration::transfer_pointers_from_satb() const {
472   const ShenandoahHeap* heap = ShenandoahHeap::heap();
473   assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking.");
474   log_debug(gc)("Transfer SATB buffers");
475   ShenandoahPurgeSATBTask purge_satb_task(task_queues());
476   heap->workers()->run_task(&purge_satb_task);
477 }
478 
479 bool ShenandoahOldGeneration::contains(oop obj) const {
480   return ShenandoahHeap::heap()->is_in_old(obj);
481 }
482 
483 void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) {
484   ShenandoahHeap* heap = ShenandoahHeap::heap();
485   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
486 
487   {
488     ShenandoahGCPhase phase(concurrent ?
489         ShenandoahPhaseTimings::final_update_region_states :
490         ShenandoahPhaseTimings::degen_gc_final_update_region_states);
491     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
492 
493     parallel_heap_region_iterate(&cl);
494     heap->assert_pinned_region_status();
495   }
496 
497   {
498     // This doesn't actually choose a collection set, but prepares a list of
499     // regions as 'candidates' for inclusion in a mixed collection.
500     ShenandoahGCPhase phase(concurrent ?
501         ShenandoahPhaseTimings::choose_cset :
502         ShenandoahPhaseTimings::degen_gc_choose_cset);
503     ShenandoahHeapLocker locker(heap->lock());
504     _old_heuristics->prepare_for_old_collections();
505   }
506 
507   {
508     // Though we did not choose a collection set above, we still may have
509     // freed up immediate garbage regions so proceed with rebuilding the free set.
510     ShenandoahGCPhase phase(concurrent ?
511         ShenandoahPhaseTimings::final_rebuild_freeset :
512         ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
513     ShenandoahHeapLocker locker(heap->lock());
514     size_t cset_young_regions, cset_old_regions;
515     size_t first_old, last_old, num_old;
516     heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old);
517     // This is just old-gen completion.  No future budgeting required here.  The only reason to rebuild the freeset here
518     // is in case there was any immediate old garbage identified.
519     heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old);
520   }
521 }
522 
523 const char* ShenandoahOldGeneration::state_name(State state) {
524   switch (state) {
525     case WAITING_FOR_BOOTSTRAP:   return "Waiting for Bootstrap";
526     case FILLING:                 return "Coalescing";
527     case BOOTSTRAPPING:           return "Bootstrapping";
528     case MARKING:                 return "Marking";
529     case EVACUATING:              return "Evacuating";
530     case EVACUATING_AFTER_GLOBAL: return "Evacuating (G)";
531     default:
532       ShouldNotReachHere();
533       return "Unknown";
534   }
535 }
536 
537 void ShenandoahOldGeneration::transition_to(State new_state) {
538   if (_state != new_state) {
539     log_debug(gc, thread)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
540     EventMark event("Old was %s, now is %s", state_name(_state), state_name(new_state));
541     validate_transition(new_state);
542     _state = new_state;
543   }
544 }
545 
546 #ifdef ASSERT
547 // This diagram depicts the expected state transitions for marking the old generation
548 // and preparing for old collections. When a young generation cycle executes, the
549 // remembered set scan must visit objects in old regions. Visiting an object which
550 // has become dead on previous old cycles will result in crashes. To avoid visiting
551 // such objects, the remembered set scan will use the old generation mark bitmap when
552 // possible. It is _not_ possible to use the old generation bitmap when old marking
553 // is active (bitmap is not complete). For this reason, the old regions are made
554 // parsable _before_ the old generation bitmap is reset. The diagram does not depict
555 // cancellation of old collections by global or full collections.
556 //
557 // When a global collection supersedes an old collection, the global mark still
558 // "completes" the old mark bitmap. Subsequent remembered set scans may use the
559 // old generation mark bitmap, but any uncollected old regions must still be made parsable
560 // before the next old generation cycle begins. For this reason, a global collection may
561 // create mixed collection candidates and coalesce and fill candidates and will put
562 // the old generation in the respective states (EVACUATING or FILLING). After a Full GC,
563 // the mark bitmaps are all reset, all regions are parsable and the mark context will
564 // not be "complete". After a Full GC, remembered set scans will _not_ use the mark bitmap
565 // and we expect the old generation to be waiting for bootstrap.
566 //
567 //                              +-----------------+
568 //               +------------> |     FILLING     | <---+
569 //               |   +--------> |                 |     |
570 //               |   |          +-----------------+     |
571 //               |   |            |                     |
572 //               |   |            | Filling Complete    | <-> A global collection may
573 //               |   |            v                     |     move the old generation
574 //               |   |          +-----------------+     |     directly from waiting for
575 //           +-- |-- |--------> |     WAITING     |     |     bootstrap to filling or
576 //           |   |   |    +---- |  FOR BOOTSTRAP  | ----+     evacuating. It may also
577 //           |   |   |    |     +-----------------+           move from filling to waiting
578 //           |   |   |    |       |                           for bootstrap.
579 //           |   |   |    |       | Reset Bitmap
580 //           |   |   |    |       v
581 //           |   |   |    |     +-----------------+     +----------------------+
582 //           |   |   |    |     |    BOOTSTRAP    | <-> |       YOUNG GC       |
583 //           |   |   |    |     |                 |     | (RSet Parses Region) |
584 //           |   |   |    |     +-----------------+     +----------------------+
585 //           |   |   |    |       |
586 //           |   |   |    |       | Old Marking
587 //           |   |   |    |       v
588 //           |   |   |    |     +-----------------+     +----------------------+
589 //           |   |   |    |     |     MARKING     | <-> |       YOUNG GC       |
590 //           |   |   +--------- |                 |     | (RSet Parses Region) |
591 //           |   |        |     +-----------------+     +----------------------+
592 //           |   |        |       |
593 //           |   |        |       | Has Evacuation Candidates
594 //           |   |        |       v
595 //           |   |        |     +-----------------+     +--------------------+
596 //           |   |        +---> |    EVACUATING   | <-> |      YOUNG GC      |
597 //           |   +------------- |                 |     | (RSet Uses Bitmap) |
598 //           |                  +-----------------+     +--------------------+
599 //           |                    |
600 //           |                    | Global Cycle Coalesces and Fills Old Regions
601 //           |                    v
602 //           |                  +-----------------+     +--------------------+
603 //           +----------------- |    EVACUATING   | <-> |      YOUNG GC      |
604 //                              |   AFTER GLOBAL  |     | (RSet Uses Bitmap) |
605 //                              +-----------------+     +--------------------+
606 //
607 //
608 void ShenandoahOldGeneration::validate_transition(State new_state) {
609   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
610   switch (new_state) {
611     case FILLING:
612       assert(_state != BOOTSTRAPPING, "Cannot begin making old regions parsable after bootstrapping");
613       assert(is_mark_complete(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state));
614       assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot begin filling without something to fill.");
615       break;
616     case WAITING_FOR_BOOTSTRAP:
617       // GC cancellation can send us back here from any state.
618       validate_waiting_for_bootstrap();
619       break;
620     case BOOTSTRAPPING:
621       assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state));
622       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates");
623       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable.");
624       break;
625     case MARKING:
626       assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state));
627       assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues.");
628       assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now.");
629       break;
630     case EVACUATING_AFTER_GLOBAL:
631       assert(_state == EVACUATING, "Must have been evacuating, state is '%s'", state_name(_state));
632       break;
633     case EVACUATING:
634       assert(_state == WAITING_FOR_BOOTSTRAP || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state));
635       assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here.");
636       break;
637     default:
638       fatal("Unknown new state");
639   }
640 }
641 
642 bool ShenandoahOldGeneration::validate_waiting_for_bootstrap() {
643   ShenandoahHeap* heap = ShenandoahHeap::heap();
644   assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark.");
645   assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping.");
646   assert(!is_concurrent_mark_in_progress(), "Cannot be marking in IDLE");
647   assert(!heap->young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE");
648   assert(!_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE");
649   assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot have mixed collection candidates in IDLE");
650   return true;
651 }
652 #endif
653 
654 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
655   _old_heuristics = new ShenandoahOldHeuristics(this, ShenandoahGenerationalHeap::heap());
656   _old_heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval);
657   _heuristics = _old_heuristics;
658   return _heuristics;
659 }
660 
661 void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) {
662   heuristics()->record_success_concurrent();
663   ShenandoahHeap::heap()->shenandoah_policy()->record_success_old();
664 }
665 
666 void ShenandoahOldGeneration::handle_failed_evacuation() {
667   if (_failed_evacuation.try_set()) {
668     log_debug(gc)("Old gen evac failure.");
669   }
670 }
671 
672 void ShenandoahOldGeneration::handle_failed_promotion(Thread* thread, size_t size) {
673   // We squelch excessive reports to reduce noise in logs.
674   const size_t MaxReportsPerEpoch = 4;
675   static size_t last_report_epoch = 0;
676   static size_t epoch_report_count = 0;
677   auto heap = ShenandoahGenerationalHeap::heap();
678 
679   size_t promotion_reserve;
680   size_t promotion_expended;
681 
682   const size_t gc_id = heap->control_thread()->get_gc_id();
683 
684   if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) {
685     {
686       // Promotion failures should be very rare.  Invest in providing useful diagnostic info.
687       ShenandoahHeapLocker locker(heap->lock());
688       promotion_reserve = get_promoted_reserve();
689       promotion_expended = get_promoted_expended();
690     }
691     PLAB* const plab = ShenandoahThreadLocalData::plab(thread);
692     const size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining();
693     const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled";
694 
695     log_info(gc, ergo)("Promotion failed, size %zu, has plab? %s, PLAB remaining: %zu"
696                        ", plab promotions %s, promotion reserve: %zu, promotion expended: %zu"
697                        ", old capacity: %zu, old_used: %zu, old unaffiliated regions: %zu",
698                        size * HeapWordSize, plab == nullptr? "no": "yes",
699                        words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended,
700                        max_capacity(), used(), free_unaffiliated_regions());
701 
702     if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) {
703       log_debug(gc, ergo)("Squelching additional promotion failure reports for current epoch");
704     } else if (gc_id != last_report_epoch) {
705       last_report_epoch = gc_id;
706       epoch_report_count = 1;
707     }
708   }
709 }
710 
711 void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words, bool promotion) {
712   // Only register the copy of the object that won the evacuation race.
713   _card_scan->register_object_without_lock(obj);
714 
715   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
716   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
717   // do this in batch, in a background GC thread than to try to carefully dirty only cards
718   // that hold interesting pointers right now.
719   _card_scan->mark_range_as_dirty(obj, words);
720 
721   if (promotion) {
722     // This evacuation was a promotion, track this as allocation against old gen
723     increase_allocated(words * HeapWordSize);
724   }
725 }
726 
727 bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() {
728   return _old_heuristics->unprocessed_old_collection_candidates() > 0;
729 }
730 
731 size_t ShenandoahOldGeneration::unprocessed_collection_candidates_live_memory() {
732   return _old_heuristics->unprocessed_old_collection_candidates_live_memory();
733 }
734 
735 void ShenandoahOldGeneration::abandon_collection_candidates() {
736   _old_heuristics->abandon_collection_candidates();
737 }
738 
739 void ShenandoahOldGeneration::prepare_for_mixed_collections_after_global_gc() {
740   assert(is_mark_complete(), "Expected old generation mark to be complete after global cycle.");
741   _old_heuristics->prepare_for_old_collections();
742   log_info(gc, ergo)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: %zu",
743                _old_heuristics->unprocessed_old_collection_candidates(),
744                _old_heuristics->coalesce_and_fill_candidates_count());
745 }
746 
747 void ShenandoahOldGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
748   // Iterate over old and free regions (exclude young).
749   ShenandoahExcludeRegionClosure<YOUNG_GENERATION> exclude_cl(cl);
750   ShenandoahGeneration::parallel_heap_region_iterate_free(&exclude_cl);
751 }
752 
753 void ShenandoahOldGeneration::set_parsable(bool parsable) {
754   _is_parsable = parsable;
755   if (_is_parsable) {
756     // The current state would have been chosen during final mark of the global
757     // collection, _before_ any decisions about class unloading have been made.
758     //
759     // After unloading classes, we have made the old generation regions parsable.
760     // We can skip filling or transition to a state that knows everything has
761     // already been filled.
762     switch (state()) {
763       case ShenandoahOldGeneration::EVACUATING:
764         transition_to(ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL);
765         break;
766       case ShenandoahOldGeneration::FILLING:
767         assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Expected no mixed collection candidates");
768         assert(_old_heuristics->coalesce_and_fill_candidates_count() > 0, "Expected coalesce and fill candidates");
769         // When the heuristic put the old generation in this state, it didn't know
770         // that we would unload classes and make everything parsable. But, we know
771         // that now so we can override this state.
772         abandon_collection_candidates();
773         transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
774         break;
775       default:
776         // We can get here during a full GC. The full GC will cancel anything
777         // happening in the old generation and return it to the waiting for bootstrap
778         // state. The full GC will then record that the old regions are parsable
779         // after rebuilding the remembered set.
780         assert(is_idle(), "Unexpected state %s at end of global GC", state_name());
781         break;
782     }
783   }
784 }
785 
786 void ShenandoahOldGeneration::complete_mixed_evacuations() {
787   assert(is_doing_mixed_evacuations(), "Mixed evacuations should be in progress");
788   if (!_old_heuristics->has_coalesce_and_fill_candidates()) {
789     // No candidate regions to coalesce and fill
790     transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
791     return;
792   }
793 
794   if (state() == ShenandoahOldGeneration::EVACUATING) {
795     transition_to(ShenandoahOldGeneration::FILLING);
796     return;
797   }
798 
799   // Here, we have no more candidates for mixed collections. The candidates for coalescing
800   // and filling have already been processed during the global cycle, so there is nothing
801   // more to do.
802   assert(state() == ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL, "Should be evacuating after a global cycle");
803   abandon_collection_candidates();
804   transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
805 }
806 
807 void ShenandoahOldGeneration::abandon_mixed_evacuations() {
808   switch(state()) {
809     case ShenandoahOldGeneration::EVACUATING:
810       transition_to(ShenandoahOldGeneration::FILLING);
811       break;
812     case ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL:
813       abandon_collection_candidates();
814       transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
815       break;
816     default:
817       log_warning(gc)("Abandon mixed evacuations in unexpected state: %s", state_name(state()));
818       ShouldNotReachHere();
819       break;
820   }
821 }
822 
823 void ShenandoahOldGeneration::clear_cards_for(ShenandoahHeapRegion* region) {
824   _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
825 }
826 
827 void ShenandoahOldGeneration::mark_card_as_dirty(void* location) {
828   _card_scan->mark_card_as_dirty((HeapWord*)location);
829 }