1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shared/strongRootsScope.hpp"
 29 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 30 #include "gc/shenandoah/shenandoahAsserts.hpp"
 31 #include "gc/shenandoah/shenandoahCardTable.hpp"
 32 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 34 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 35 #include "gc/shenandoah/shenandoahHeap.hpp"
 36 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 37 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 38 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
 39 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 41 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 42 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 43 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 44 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 45 #include "gc/shenandoah/shenandoahUtils.hpp"
 46 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 47 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 48 #include "runtime/threads.hpp"
 49 #include "utilities/events.hpp"
 50 
 51 class ShenandoahFlushAllSATB : public ThreadClosure {
 52 private:
 53   SATBMarkQueueSet& _satb_qset;
 54 
 55 public:
 56   explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) :
 57     _satb_qset(satb_qset) {}
 58 
 59   void do_thread(Thread* thread) override {
 60     // Transfer any partial buffer to the qset for completed buffer processing.
 61     _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
 62   }
 63 };
 64 
 65 class ShenandoahProcessOldSATB : public SATBBufferClosure {
 66 private:
 67   ShenandoahObjToScanQueue*       _queue;
 68   ShenandoahHeap*                 _heap;
 69   ShenandoahMarkingContext* const _mark_context;
 70   size_t                          _trashed_oops;
 71 
 72 public:
 73   explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) :
 74     _queue(q),
 75     _heap(ShenandoahHeap::heap()),
 76     _mark_context(_heap->marking_context()),
 77     _trashed_oops(0) {}
 78 
 79   void do_buffer(void** buffer, size_t size) override {
 80     assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
 81     for (size_t i = 0; i < size; ++i) {
 82       oop *p = (oop *) &buffer[i];
 83       ShenandoahHeapRegion* region = _heap->heap_region_containing(*p);
 84       if (region->is_old() && region->is_active()) {
 85           ShenandoahMark::mark_through_ref<oop, OLD>(p, _queue, nullptr, _mark_context, false);
 86       } else {
 87         _trashed_oops++;
 88       }
 89     }
 90   }
 91 
 92   size_t trashed_oops() const {
 93     return _trashed_oops;
 94   }
 95 };
 96 
 97 class ShenandoahPurgeSATBTask : public WorkerTask {
 98 private:
 99   ShenandoahObjToScanQueueSet* _mark_queues;
100   volatile size_t             _trashed_oops;
101 
102 public:
103   explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) :
104     WorkerTask("Purge SATB"),
105     _mark_queues(queues),
106     _trashed_oops(0) {
107     Threads::change_thread_claim_token();
108   }
109 
110   ~ShenandoahPurgeSATBTask() {
111     if (_trashed_oops > 0) {
112       log_debug(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers", _trashed_oops);
113     }
114   }
115 
116   void work(uint worker_id) override {
117     ShenandoahParallelWorkerSession worker_session(worker_id);
118     ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
119     ShenandoahFlushAllSATB flusher(satb_queues);
120     Threads::possibly_parallel_threads_do(true /* is_par */, &flusher);
121 
122     ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id);
123     ShenandoahProcessOldSATB processor(mark_queue);
124     while (satb_queues.apply_closure_to_completed_buffer(&processor)) {}
125 
126     Atomic::add(&_trashed_oops, processor.trashed_oops());
127   }
128 };
129 
130 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
131 private:
132   uint                    _nworkers;
133   ShenandoahHeapRegion**  _coalesce_and_fill_region_array;
134   uint                    _coalesce_and_fill_region_count;
135   volatile bool           _is_preempted;
136 
137 public:
138   ShenandoahConcurrentCoalesceAndFillTask(uint nworkers,
139                                           ShenandoahHeapRegion** coalesce_and_fill_region_array,
140                                           uint region_count) :
141     WorkerTask("Shenandoah Concurrent Coalesce and Fill"),
142     _nworkers(nworkers),
143     _coalesce_and_fill_region_array(coalesce_and_fill_region_array),
144     _coalesce_and_fill_region_count(region_count),
145     _is_preempted(false) {
146   }
147 
148   void work(uint worker_id) override {
149     ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::conc_coalesce_and_fill, ShenandoahPhaseTimings::ScanClusters, worker_id);
150     for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) {
151       ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx];
152       if (r->is_humongous()) {
153         // There is only one object in this region and it is not garbage,
154         // so no need to coalesce or fill.
155         continue;
156       }
157 
158       if (!r->oop_coalesce_and_fill(true)) {
159         // Coalesce and fill has been preempted
160         Atomic::store(&_is_preempted, true);
161         return;
162       }
163     }
164   }
165 
166   // Value returned from is_completed() is only valid after all worker thread have terminated.
167   bool is_completed() {
168     return !Atomic::load(&_is_preempted);
169   }
170 };
171 
172 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity)
173   : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity),
174     _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
175     _old_heuristics(nullptr),
176     _region_balance(0),
177     _promoted_reserve(0),
178     _promoted_expended(0),
179     _promotion_potential(0),
180     _pad_for_promote_in_place(0),
181     _promotable_humongous_regions(0),
182     _promotable_regular_regions(0),
183     _is_parsable(true),
184     _card_scan(nullptr),
185     _state(WAITING_FOR_BOOTSTRAP),
186     _growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION),
187     _min_growth_before_compaction ((ShenandoahMinOldGenGrowthPercent * FRACTIONAL_DENOMINATOR) / 100)
188 {
189   _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
190   // Always clear references for old generation
191   ref_processor()->set_soft_reference_policy(true);
192 
193   if (ShenandoahCardBarrier) {
194     ShenandoahCardTable* card_table = ShenandoahBarrierSet::barrier_set()->card_table();
195     size_t card_count = card_table->cards_required(ShenandoahHeap::heap()->reserved_region().word_size());
196     auto rs = new ShenandoahDirectCardMarkRememberedSet(card_table, card_count);
197     _card_scan = new ShenandoahScanRemembered(rs);
198   }
199 }
200 
201 void ShenandoahOldGeneration::set_promoted_reserve(size_t new_val) {
202   shenandoah_assert_heaplocked_or_safepoint();
203   _promoted_reserve = new_val;
204 }
205 
206 size_t ShenandoahOldGeneration::get_promoted_reserve() const {
207   return _promoted_reserve;
208 }
209 
210 void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) {
211   shenandoah_assert_heaplocked_or_safepoint();
212   _promoted_reserve += increment;
213 }
214 
215 void ShenandoahOldGeneration::reset_promoted_expended() {
216   shenandoah_assert_heaplocked_or_safepoint();
217   Atomic::store(&_promoted_expended, (size_t) 0);
218 }
219 
220 size_t ShenandoahOldGeneration::expend_promoted(size_t increment) {
221   shenandoah_assert_heaplocked_or_safepoint();
222   assert(get_promoted_expended() + increment <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
223   return Atomic::add(&_promoted_expended, increment);
224 }
225 
226 size_t ShenandoahOldGeneration::unexpend_promoted(size_t decrement) {
227   return Atomic::sub(&_promoted_expended, decrement);
228 }
229 
230 size_t ShenandoahOldGeneration::get_promoted_expended() const {
231   return Atomic::load(&_promoted_expended);
232 }
233 
234 bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) const {
235   assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
236 
237   const size_t requested_bytes = req.size() * HeapWordSize;
238   // The promotion reserve may also be used for evacuations. If we can promote this object,
239   // then we can also evacuate it.
240   if (can_promote(requested_bytes)) {
241     // The promotion reserve should be able to accommodate this request. The request
242     // might still fail if alignment with the card table increases the size. The request
243     // may also fail if the heap is badly fragmented and the free set cannot find room for it.
244     return true;
245   }
246 
247   if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
248     // The promotion reserve cannot accommodate this plab request. Check if we still have room for
249     // evacuations. Note that we cannot really know how much of the plab will be used for evacuations,
250     // so here we only check that some evacuation reserve still exists.
251     return get_evacuation_reserve() > 0;
252   }
253 
254   // This is a shared allocation request. We've already checked that it can't be promoted, so if
255   // it is a promotion, we return false. Otherwise, it is a shared evacuation request, and we allow
256   // the allocation to proceed.
257   return !req.is_promotion();
258 }
259 
260 void
261 ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAllocRequest &req) {
262   // Note: Even when a mutator is performing a promotion outside a LAB, we use a 'shared_gc' request.
263   if (req.is_gc_alloc()) {
264     const size_t actual_size = req.actual_size() * HeapWordSize;
265     if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
266       // We've created a new plab. Now we configure it whether it will be used for promotions
267       // and evacuations - or just evacuations.
268       Thread* thread = Thread::current();
269       ShenandoahThreadLocalData::reset_plab_promoted(thread);
270 
271       // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
272       // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
273       if (can_promote(actual_size)) {
274         // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
275         // When we retire this plab, we'll unexpend what we don't really use.
276         expend_promoted(actual_size);
277         ShenandoahThreadLocalData::enable_plab_promotions(thread);
278         ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size);
279       } else {
280         // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
281         ShenandoahThreadLocalData::disable_plab_promotions(thread);
282         ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
283       }
284     } else if (req.is_promotion()) {
285       // Shared promotion.
286       expend_promoted(actual_size);
287     }
288   }
289 }
290 
291 size_t ShenandoahOldGeneration::get_live_bytes_after_last_mark() const {
292   return _live_bytes_after_last_mark;
293 }
294 
295 void ShenandoahOldGeneration::set_live_bytes_after_last_mark(size_t bytes) {
296   if (bytes == 0) {
297     // Restart search for best old-gen size to the initial state
298     _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
299     _growth_before_compaction = INITIAL_GROWTH_BEFORE_COMPACTION;
300   } else {
301     _live_bytes_after_last_mark = bytes;
302     _growth_before_compaction /= 2;
303     if (_growth_before_compaction < _min_growth_before_compaction) {
304       _growth_before_compaction = _min_growth_before_compaction;
305     }
306   }
307 }
308 
309 void ShenandoahOldGeneration::handle_failed_transfer() {
310   _old_heuristics->trigger_cannot_expand();
311 }
312 
313 size_t ShenandoahOldGeneration::usage_trigger_threshold() const {
314   size_t result = _live_bytes_after_last_mark + (_live_bytes_after_last_mark * _growth_before_compaction) / FRACTIONAL_DENOMINATOR;
315   return result;
316 }
317 
318 bool ShenandoahOldGeneration::contains(ShenandoahAffiliation affiliation) const {
319   return affiliation == OLD_GENERATION;
320 }
321 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const {
322   return region->is_old();
323 }
324 
325 void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
326   ShenandoahIncludeRegionClosure<OLD_GENERATION> old_regions_cl(cl);
327   ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions_cl);
328 }
329 
330 void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
331   ShenandoahIncludeRegionClosure<OLD_GENERATION> old_regions_cl(cl);
332   ShenandoahHeap::heap()->heap_region_iterate(&old_regions_cl);
333 }
334 
335 void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) {
336   ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress);
337 }
338 
339 bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() {
340   return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress();
341 }
342 
343 void ShenandoahOldGeneration::cancel_marking() {
344   if (is_concurrent_mark_in_progress()) {
345     log_debug(gc)("Abandon SATB buffers");
346     ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
347   }
348 
349   ShenandoahGeneration::cancel_marking();
350 }
351 
352 void ShenandoahOldGeneration::cancel_gc() {
353   shenandoah_assert_safepoint();
354   if (is_idle()) {
355 #ifdef ASSERT
356     validate_waiting_for_bootstrap();
357 #endif
358   } else {
359     log_info(gc)("Terminating old gc cycle.");
360     // Stop marking
361     cancel_marking();
362     // Stop tracking old regions
363     abandon_collection_candidates();
364     // Remove old generation access to young generation mark queues
365     ShenandoahHeap::heap()->young_generation()->set_old_gen_task_queues(nullptr);
366     // Transition to IDLE now.
367     transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
368   }
369 }
370 
371 void ShenandoahOldGeneration::prepare_gc() {
372   // Now that we have made the old generation parsable, it is safe to reset the mark bitmap.
373   assert(state() != FILLING, "Cannot reset old without making it parsable");
374 
375   ShenandoahGeneration::prepare_gc();
376 }
377 
378 bool ShenandoahOldGeneration::entry_coalesce_and_fill() {
379   ShenandoahHeap* const heap = ShenandoahHeap::heap();
380 
381   static const char* msg = "Coalescing and filling (Old)";
382   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
383 
384   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
385   EventMark em("%s", msg);
386   ShenandoahWorkerScope scope(heap->workers(),
387                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
388                               msg);
389 
390   return coalesce_and_fill();
391 }
392 
393 // Make the old generation regions parsable, so they can be safely
394 // scanned when looking for objects in memory indicated by dirty cards.
395 bool ShenandoahOldGeneration::coalesce_and_fill() {
396   transition_to(FILLING);
397 
398   // This code will see the same set of regions to fill on each resumption as it did
399   // on the initial run. That's okay because each region keeps track of its own coalesce
400   // and fill state. Regions that were filled on a prior attempt will not try to fill again.
401   uint coalesce_and_fill_regions_count = _old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
402   assert(coalesce_and_fill_regions_count <= ShenandoahHeap::heap()->num_regions(), "Sanity");
403   if (coalesce_and_fill_regions_count == 0) {
404     // No regions need to be filled.
405     abandon_collection_candidates();
406     return true;
407   }
408 
409   ShenandoahHeap* const heap = ShenandoahHeap::heap();
410   WorkerThreads* workers = heap->workers();
411   uint nworkers = workers->active_workers();
412   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count);
413 
414   log_debug(gc)("Starting (or resuming) coalesce-and-fill of " UINT32_FORMAT " old heap regions", coalesce_and_fill_regions_count);
415   workers->run_task(&task);
416   if (task.is_completed()) {
417     // We no longer need to track regions that need to be coalesced and filled.
418     abandon_collection_candidates();
419     return true;
420   } else {
421     // Coalesce-and-fill has been preempted. We'll finish that effort in the future.  Do not invoke
422     // ShenandoahGeneration::prepare_gc() until coalesce-and-fill is done because it resets the mark bitmap
423     // and invokes set_mark_incomplete().  Coalesce-and-fill depends on the mark bitmap.
424     log_debug(gc)("Suspending coalesce-and-fill of old heap regions");
425     return false;
426   }
427 }
428 
429 void ShenandoahOldGeneration::transfer_pointers_from_satb() {
430   ShenandoahHeap* heap = ShenandoahHeap::heap();
431   shenandoah_assert_safepoint();
432   assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking.");
433   log_debug(gc)("Transfer SATB buffers");
434   uint nworkers = heap->workers()->active_workers();
435   StrongRootsScope scope(nworkers);
436 
437   ShenandoahPurgeSATBTask purge_satb_task(task_queues());
438   heap->workers()->run_task(&purge_satb_task);
439 }
440 
441 bool ShenandoahOldGeneration::contains(oop obj) const {
442   return ShenandoahHeap::heap()->is_in_old(obj);
443 }
444 
445 void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) {
446   ShenandoahHeap* heap = ShenandoahHeap::heap();
447   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
448 
449   {
450     ShenandoahGCPhase phase(concurrent ?
451         ShenandoahPhaseTimings::final_update_region_states :
452         ShenandoahPhaseTimings::degen_gc_final_update_region_states);
453     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
454 
455     parallel_heap_region_iterate(&cl);
456     heap->assert_pinned_region_status();
457   }
458 
459   {
460     // This doesn't actually choose a collection set, but prepares a list of
461     // regions as 'candidates' for inclusion in a mixed collection.
462     ShenandoahGCPhase phase(concurrent ?
463         ShenandoahPhaseTimings::choose_cset :
464         ShenandoahPhaseTimings::degen_gc_choose_cset);
465     ShenandoahHeapLocker locker(heap->lock());
466     _old_heuristics->prepare_for_old_collections();
467   }
468 
469   {
470     // Though we did not choose a collection set above, we still may have
471     // freed up immediate garbage regions so proceed with rebuilding the free set.
472     ShenandoahGCPhase phase(concurrent ?
473         ShenandoahPhaseTimings::final_rebuild_freeset :
474         ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
475     ShenandoahHeapLocker locker(heap->lock());
476     size_t cset_young_regions, cset_old_regions;
477     size_t first_old, last_old, num_old;
478     heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old);
479     // This is just old-gen completion.  No future budgeting required here.  The only reason to rebuild the freeset here
480     // is in case there was any immediate old garbage identified.
481     heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old);
482   }
483 }
484 
485 const char* ShenandoahOldGeneration::state_name(State state) {
486   switch (state) {
487     case WAITING_FOR_BOOTSTRAP:   return "Waiting for Bootstrap";
488     case FILLING:                 return "Coalescing";
489     case BOOTSTRAPPING:           return "Bootstrapping";
490     case MARKING:                 return "Marking";
491     case EVACUATING:              return "Evacuating";
492     case EVACUATING_AFTER_GLOBAL: return "Evacuating (G)";
493     default:
494       ShouldNotReachHere();
495       return "Unknown";
496   }
497 }
498 
499 void ShenandoahOldGeneration::transition_to(State new_state) {
500   if (_state != new_state) {
501     log_debug(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
502     EventMark event("Old was %s, now is %s", state_name(_state), state_name(new_state));
503     validate_transition(new_state);
504     _state = new_state;
505   }
506 }
507 
508 #ifdef ASSERT
509 // This diagram depicts the expected state transitions for marking the old generation
510 // and preparing for old collections. When a young generation cycle executes, the
511 // remembered set scan must visit objects in old regions. Visiting an object which
512 // has become dead on previous old cycles will result in crashes. To avoid visiting
513 // such objects, the remembered set scan will use the old generation mark bitmap when
514 // possible. It is _not_ possible to use the old generation bitmap when old marking
515 // is active (bitmap is not complete). For this reason, the old regions are made
516 // parsable _before_ the old generation bitmap is reset. The diagram does not depict
517 // cancellation of old collections by global or full collections.
518 //
519 // When a global collection supersedes an old collection, the global mark still
520 // "completes" the old mark bitmap. Subsequent remembered set scans may use the
521 // old generation mark bitmap, but any uncollected old regions must still be made parsable
522 // before the next old generation cycle begins. For this reason, a global collection may
523 // create mixed collection candidates and coalesce and fill candidates and will put
524 // the old generation in the respective states (EVACUATING or FILLING). After a Full GC,
525 // the mark bitmaps are all reset, all regions are parsable and the mark context will
526 // not be "complete". After a Full GC, remembered set scans will _not_ use the mark bitmap
527 // and we expect the old generation to be waiting for bootstrap.
528 //
529 //                              +-----------------+
530 //               +------------> |     FILLING     | <---+
531 //               |   +--------> |                 |     |
532 //               |   |          +-----------------+     |
533 //               |   |            |                     |
534 //               |   |            | Filling Complete    | <-> A global collection may
535 //               |   |            v                     |     move the old generation
536 //               |   |          +-----------------+     |     directly from waiting for
537 //           +-- |-- |--------> |     WAITING     |     |     bootstrap to filling or
538 //           |   |   |    +---- |  FOR BOOTSTRAP  | ----+     evacuating. It may also
539 //           |   |   |    |     +-----------------+           move from filling to waiting
540 //           |   |   |    |       |                           for bootstrap.
541 //           |   |   |    |       | Reset Bitmap
542 //           |   |   |    |       v
543 //           |   |   |    |     +-----------------+     +----------------------+
544 //           |   |   |    |     |    BOOTSTRAP    | <-> |       YOUNG GC       |
545 //           |   |   |    |     |                 |     | (RSet Parses Region) |
546 //           |   |   |    |     +-----------------+     +----------------------+
547 //           |   |   |    |       |
548 //           |   |   |    |       | Old Marking
549 //           |   |   |    |       v
550 //           |   |   |    |     +-----------------+     +----------------------+
551 //           |   |   |    |     |     MARKING     | <-> |       YOUNG GC       |
552 //           |   |   +--------- |                 |     | (RSet Parses Region) |
553 //           |   |        |     +-----------------+     +----------------------+
554 //           |   |        |       |
555 //           |   |        |       | Has Evacuation Candidates
556 //           |   |        |       v
557 //           |   |        |     +-----------------+     +--------------------+
558 //           |   |        +---> |    EVACUATING   | <-> |      YOUNG GC      |
559 //           |   +------------- |                 |     | (RSet Uses Bitmap) |
560 //           |                  +-----------------+     +--------------------+
561 //           |                    |
562 //           |                    | Global Cycle Coalesces and Fills Old Regions
563 //           |                    v
564 //           |                  +-----------------+     +--------------------+
565 //           +----------------- |    EVACUATING   | <-> |      YOUNG GC      |
566 //                              |   AFTER GLOBAL  |     | (RSet Uses Bitmap) |
567 //                              +-----------------+     +--------------------+
568 //
569 //
570 void ShenandoahOldGeneration::validate_transition(State new_state) {
571   ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
572   switch (new_state) {
573     case FILLING:
574       assert(_state != BOOTSTRAPPING, "Cannot begin making old regions parsable after bootstrapping");
575       assert(is_mark_complete(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state));
576       assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot begin filling without something to fill.");
577       break;
578     case WAITING_FOR_BOOTSTRAP:
579       // GC cancellation can send us back here from any state.
580       validate_waiting_for_bootstrap();
581       break;
582     case BOOTSTRAPPING:
583       assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state));
584       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates");
585       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable.");
586       break;
587     case MARKING:
588       assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state));
589       assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues.");
590       assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now.");
591       break;
592     case EVACUATING_AFTER_GLOBAL:
593       assert(_state == EVACUATING, "Must have been evacuating, state is '%s'", state_name(_state));
594       break;
595     case EVACUATING:
596       assert(_state == WAITING_FOR_BOOTSTRAP || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state));
597       assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here.");
598       break;
599     default:
600       fatal("Unknown new state");
601   }
602 }
603 
604 bool ShenandoahOldGeneration::validate_waiting_for_bootstrap() {
605   ShenandoahHeap* heap = ShenandoahHeap::heap();
606   assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark.");
607   assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping.");
608   assert(!is_concurrent_mark_in_progress(), "Cannot be marking in IDLE");
609   assert(!heap->young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE");
610   assert(!_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE");
611   assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot have mixed collection candidates in IDLE");
612   return true;
613 }
614 #endif
615 
616 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
617   _old_heuristics = new ShenandoahOldHeuristics(this, ShenandoahGenerationalHeap::heap());
618   _old_heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval);
619   _heuristics = _old_heuristics;
620   return _heuristics;
621 }
622 
623 void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) {
624   heuristics()->record_success_concurrent();
625   ShenandoahHeap::heap()->shenandoah_policy()->record_success_old();
626 }
627 
628 void ShenandoahOldGeneration::handle_failed_evacuation() {
629   if (_failed_evacuation.try_set()) {
630     log_debug(gc)("Old gen evac failure.");
631   }
632 }
633 
634 void ShenandoahOldGeneration::handle_failed_promotion(Thread* thread, size_t size) {
635   // We squelch excessive reports to reduce noise in logs.
636   const size_t MaxReportsPerEpoch = 4;
637   static size_t last_report_epoch = 0;
638   static size_t epoch_report_count = 0;
639   auto heap = ShenandoahGenerationalHeap::heap();
640 
641   size_t promotion_reserve;
642   size_t promotion_expended;
643 
644   const size_t gc_id = heap->control_thread()->get_gc_id();
645 
646   if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) {
647     {
648       // Promotion failures should be very rare.  Invest in providing useful diagnostic info.
649       ShenandoahHeapLocker locker(heap->lock());
650       promotion_reserve = get_promoted_reserve();
651       promotion_expended = get_promoted_expended();
652     }
653     PLAB* const plab = ShenandoahThreadLocalData::plab(thread);
654     const size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining();
655     const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled";
656 
657     log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT
658                        ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT
659                        ", old capacity: " SIZE_FORMAT ", old_used: " SIZE_FORMAT ", old unaffiliated regions: " SIZE_FORMAT,
660                        size * HeapWordSize, plab == nullptr? "no": "yes",
661                        words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended,
662                        max_capacity(), used(), free_unaffiliated_regions());
663 
664     if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) {
665       log_debug(gc, ergo)("Squelching additional promotion failure reports for current epoch");
666     } else if (gc_id != last_report_epoch) {
667       last_report_epoch = gc_id;
668       epoch_report_count = 1;
669     }
670   }
671 }
672 
673 void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words, bool promotion) {
674   // Only register the copy of the object that won the evacuation race.
675   _card_scan->register_object_without_lock(obj);
676 
677   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
678   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
679   // do this in batch, in a background GC thread than to try to carefully dirty only cards
680   // that hold interesting pointers right now.
681   _card_scan->mark_range_as_dirty(obj, words);
682 
683   if (promotion) {
684     // This evacuation was a promotion, track this as allocation against old gen
685     increase_allocated(words * HeapWordSize);
686   }
687 }
688 
689 bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() {
690   return _old_heuristics->unprocessed_old_collection_candidates() > 0;
691 }
692 
693 size_t ShenandoahOldGeneration::unprocessed_collection_candidates_live_memory() {
694   return _old_heuristics->unprocessed_old_collection_candidates_live_memory();
695 }
696 
697 void ShenandoahOldGeneration::abandon_collection_candidates() {
698   _old_heuristics->abandon_collection_candidates();
699 }
700 
701 void ShenandoahOldGeneration::prepare_for_mixed_collections_after_global_gc() {
702   assert(is_mark_complete(), "Expected old generation mark to be complete after global cycle.");
703   _old_heuristics->prepare_for_old_collections();
704   log_info(gc, ergo)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: " SIZE_FORMAT,
705                _old_heuristics->unprocessed_old_collection_candidates(),
706                _old_heuristics->coalesce_and_fill_candidates_count());
707 }
708 
709 void ShenandoahOldGeneration::parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
710   // Iterate over old and free regions (exclude young).
711   ShenandoahExcludeRegionClosure<YOUNG_GENERATION> exclude_cl(cl);
712   ShenandoahGeneration::parallel_heap_region_iterate_free(&exclude_cl);
713 }
714 
715 void ShenandoahOldGeneration::set_parsable(bool parsable) {
716   _is_parsable = parsable;
717   if (_is_parsable) {
718     // The current state would have been chosen during final mark of the global
719     // collection, _before_ any decisions about class unloading have been made.
720     //
721     // After unloading classes, we have made the old generation regions parsable.
722     // We can skip filling or transition to a state that knows everything has
723     // already been filled.
724     switch (state()) {
725       case ShenandoahOldGeneration::EVACUATING:
726         transition_to(ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL);
727         break;
728       case ShenandoahOldGeneration::FILLING:
729         assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Expected no mixed collection candidates");
730         assert(_old_heuristics->coalesce_and_fill_candidates_count() > 0, "Expected coalesce and fill candidates");
731         // When the heuristic put the old generation in this state, it didn't know
732         // that we would unload classes and make everything parsable. But, we know
733         // that now so we can override this state.
734         abandon_collection_candidates();
735         transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
736         break;
737       default:
738         // We can get here during a full GC. The full GC will cancel anything
739         // happening in the old generation and return it to the waiting for bootstrap
740         // state. The full GC will then record that the old regions are parsable
741         // after rebuilding the remembered set.
742         assert(is_idle(), "Unexpected state %s at end of global GC", state_name());
743         break;
744     }
745   }
746 }
747 
748 void ShenandoahOldGeneration::complete_mixed_evacuations() {
749   assert(is_doing_mixed_evacuations(), "Mixed evacuations should be in progress");
750   if (!_old_heuristics->has_coalesce_and_fill_candidates()) {
751     // No candidate regions to coalesce and fill
752     transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
753     return;
754   }
755 
756   if (state() == ShenandoahOldGeneration::EVACUATING) {
757     transition_to(ShenandoahOldGeneration::FILLING);
758     return;
759   }
760 
761   // Here, we have no more candidates for mixed collections. The candidates for coalescing
762   // and filling have already been processed during the global cycle, so there is nothing
763   // more to do.
764   assert(state() == ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL, "Should be evacuating after a global cycle");
765   abandon_collection_candidates();
766   transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
767 }
768 
769 void ShenandoahOldGeneration::abandon_mixed_evacuations() {
770   switch(state()) {
771     case ShenandoahOldGeneration::EVACUATING:
772       transition_to(ShenandoahOldGeneration::FILLING);
773       break;
774     case ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL:
775       abandon_collection_candidates();
776       transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
777       break;
778     default:
779       log_warning(gc)("Abandon mixed evacuations in unexpected state: %s", state_name(state()));
780       ShouldNotReachHere();
781       break;
782   }
783 }
784 
785 void ShenandoahOldGeneration::clear_cards_for(ShenandoahHeapRegion* region) {
786   _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
787 }
788 
789 void ShenandoahOldGeneration::mark_card_as_dirty(void* location) {
790   _card_scan->mark_card_as_dirty((HeapWord*)location);
791 }