1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shared/strongRootsScope.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 31 #include "gc/shenandoah/shenandoahAsserts.hpp"
 32 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 33 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 34 #include "gc/shenandoah/shenandoahHeap.hpp"
 35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 36 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 37 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
 38 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 40 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 42 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 43 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 44 #include "gc/shenandoah/shenandoahUtils.hpp"
 45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 46 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 47 #include "runtime/threads.hpp"
 48 #include "utilities/events.hpp"
 49 
 50 class ShenandoahFlushAllSATB : public ThreadClosure {
 51 private:
 52   SATBMarkQueueSet& _satb_qset;
 53 
 54 public:
 55   explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) :
 56     _satb_qset(satb_qset) {}
 57 
 58   void do_thread(Thread* thread) override {
 59     // Transfer any partial buffer to the qset for completed buffer processing.
 60     _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
 61   }
 62 };
 63 
 64 class ShenandoahProcessOldSATB : public SATBBufferClosure {
 65 private:
 66   ShenandoahObjToScanQueue*       _queue;
 67   ShenandoahHeap*                 _heap;
 68   ShenandoahMarkingContext* const _mark_context;
 69   size_t                          _trashed_oops;
 70 
 71 public:
 72   explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) :
 73     _queue(q),
 74     _heap(ShenandoahHeap::heap()),
 75     _mark_context(_heap->marking_context()),
 76     _trashed_oops(0) {}
 77 
 78   void do_buffer(void** buffer, size_t size) override {
 79     assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
 80     for (size_t i = 0; i < size; ++i) {
 81       oop *p = (oop *) &buffer[i];
 82       ShenandoahHeapRegion* region = _heap->heap_region_containing(*p);
 83       if (region->is_old() && region->is_active()) {
 84           ShenandoahMark::mark_through_ref<oop, OLD>(p, _queue, nullptr, _mark_context, false);
 85       } else {
 86         _trashed_oops++;
 87       }
 88     }
 89   }
 90 
 91   size_t trashed_oops() const {
 92     return _trashed_oops;
 93   }
 94 };
 95 
 96 class ShenandoahPurgeSATBTask : public WorkerTask {
 97 private:
 98   ShenandoahObjToScanQueueSet* _mark_queues;
 99   volatile size_t             _trashed_oops;
100 
101 public:
102   explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) :
103     WorkerTask("Purge SATB"),
104     _mark_queues(queues),
105     _trashed_oops(0) {
106     Threads::change_thread_claim_token();
107   }
108 
109   ~ShenandoahPurgeSATBTask() {
110     if (_trashed_oops > 0) {
111       log_info(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers", _trashed_oops);
112     }
113   }
114 
115   void work(uint worker_id) override {
116     ShenandoahParallelWorkerSession worker_session(worker_id);
117     ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
118     ShenandoahFlushAllSATB flusher(satb_queues);
119     Threads::possibly_parallel_threads_do(true /* is_par */, &flusher);
120 
121     ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id);
122     ShenandoahProcessOldSATB processor(mark_queue);
123     while (satb_queues.apply_closure_to_completed_buffer(&processor)) {}
124 
125     Atomic::add(&_trashed_oops, processor.trashed_oops());
126   }
127 };
128 
129 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
130 private:
131   uint                    _nworkers;
132   ShenandoahHeapRegion**  _coalesce_and_fill_region_array;
133   uint                    _coalesce_and_fill_region_count;
134   volatile bool           _is_preempted;
135 
136 public:
137   ShenandoahConcurrentCoalesceAndFillTask(uint nworkers,
138                                           ShenandoahHeapRegion** coalesce_and_fill_region_array,
139                                           uint region_count) :
140     WorkerTask("Shenandoah Concurrent Coalesce and Fill"),
141     _nworkers(nworkers),
142     _coalesce_and_fill_region_array(coalesce_and_fill_region_array),
143     _coalesce_and_fill_region_count(region_count),
144     _is_preempted(false) {
145   }
146 
147   void work(uint worker_id) override {
148     ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::conc_coalesce_and_fill, ShenandoahPhaseTimings::ScanClusters, worker_id);
149     for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) {
150       ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx];
151       if (r->is_humongous()) {
152         // There is only one object in this region and it is not garbage,
153         // so no need to coalesce or fill.
154         continue;
155       }
156 
157       if (!r->oop_coalesce_and_fill(true)) {
158         // Coalesce and fill has been preempted
159         Atomic::store(&_is_preempted, true);
160         return;
161       }
162     }
163   }
164 
165   // Value returned from is_completed() is only valid after all worker thread have terminated.
166   bool is_completed() {
167     return !Atomic::load(&_is_preempted);
168   }
169 };
170 
171 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity)
172   : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity),
173     _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
174     _old_heuristics(nullptr),
175     _region_balance(0),
176     _promoted_reserve(0),
177     _promoted_expended(0),
178     _promotion_potential(0),
179     _pad_for_promote_in_place(0),
180     _promotable_humongous_regions(0),
181     _promotable_regular_regions(0),
182     _is_parseable(true),
183     _state(WAITING_FOR_BOOTSTRAP),
184     _growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION),
185     _min_growth_before_compaction ((ShenandoahMinOldGenGrowthPercent * FRACTIONAL_DENOMINATOR) / 100)
186 {
187   _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
188   // Always clear references for old generation
189   ref_processor()->set_soft_reference_policy(true);
190 }
191 
192 void ShenandoahOldGeneration::set_promoted_reserve(size_t new_val) {
193   shenandoah_assert_heaplocked_or_safepoint();
194   _promoted_reserve = new_val;
195 }
196 
197 size_t ShenandoahOldGeneration::get_promoted_reserve() const {
198   return _promoted_reserve;
199 }
200 
201 void ShenandoahOldGeneration::augment_promoted_reserve(size_t increment) {
202   shenandoah_assert_heaplocked_or_safepoint();
203   _promoted_reserve += increment;
204 }
205 
206 void ShenandoahOldGeneration::reset_promoted_expended() {
207   shenandoah_assert_heaplocked_or_safepoint();
208   Atomic::store(&_promoted_expended, (size_t) 0);
209 }
210 
211 size_t ShenandoahOldGeneration::expend_promoted(size_t increment) {
212   shenandoah_assert_heaplocked_or_safepoint();
213   assert(get_promoted_expended() + increment <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
214   return Atomic::add(&_promoted_expended, increment);
215 }
216 
217 size_t ShenandoahOldGeneration::unexpend_promoted(size_t decrement) {
218   return Atomic::sub(&_promoted_expended, decrement);
219 }
220 
221 size_t ShenandoahOldGeneration::get_promoted_expended() {
222   return Atomic::load(&_promoted_expended);
223 }
224 
225 size_t ShenandoahOldGeneration::get_live_bytes_after_last_mark() const {
226   return _live_bytes_after_last_mark;
227 }
228 
229 void ShenandoahOldGeneration::set_live_bytes_after_last_mark(size_t bytes) {
230   _live_bytes_after_last_mark = bytes;
231   _growth_before_compaction /= 2;
232   if (_growth_before_compaction < _min_growth_before_compaction) {
233     _growth_before_compaction = _min_growth_before_compaction;
234   }
235 }
236 
237 void ShenandoahOldGeneration::handle_failed_transfer() {
238   _old_heuristics->trigger_cannot_expand();
239 }
240 
241 size_t ShenandoahOldGeneration::usage_trigger_threshold() const {
242   size_t result = _live_bytes_after_last_mark + (_live_bytes_after_last_mark * _growth_before_compaction) / FRACTIONAL_DENOMINATOR;
243   return result;
244 }
245 
246 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const {
247   // TODO: Should this be region->is_old() instead?
248   return !region->is_young();
249 }
250 
251 void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
252   ShenandoahIncludeRegionClosure<OLD_GENERATION> old_regions_cl(cl);
253   ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions_cl);
254 }
255 
256 void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
257   ShenandoahIncludeRegionClosure<OLD_GENERATION> old_regions_cl(cl);
258   ShenandoahHeap::heap()->heap_region_iterate(&old_regions_cl);
259 }
260 
261 void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) {
262   ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress);
263 }
264 
265 bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() {
266   return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress();
267 }
268 
269 void ShenandoahOldGeneration::cancel_marking() {
270   if (is_concurrent_mark_in_progress()) {
271     log_info(gc)("Abandon SATB buffers");
272     ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
273   }
274 
275   ShenandoahGeneration::cancel_marking();
276 }
277 
278 void ShenandoahOldGeneration::prepare_gc() {
279   // Now that we have made the old generation parsable, it is safe to reset the mark bitmap.
280   assert(state() != FILLING, "Cannot reset old without making it parsable");
281 
282   ShenandoahGeneration::prepare_gc();
283 }
284 
285 bool ShenandoahOldGeneration::entry_coalesce_and_fill() {
286   ShenandoahHeap* const heap = ShenandoahHeap::heap();
287 
288   static const char* msg = "Coalescing and filling (OLD)";
289   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_coalesce_and_fill);
290 
291   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
292   EventMark em("%s", msg);
293   ShenandoahWorkerScope scope(heap->workers(),
294                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
295                               msg);
296 
297   return coalesce_and_fill();
298 }
299 
300 // Make the old generation regions parsable, so they can be safely
301 // scanned when looking for objects in memory indicated by dirty cards.
302 bool ShenandoahOldGeneration::coalesce_and_fill() {
303   transition_to(FILLING);
304 
305   // This code will see the same set of regions to fill on each resumption as it did
306   // on the initial run. That's okay because each region keeps track of its own coalesce
307   // and fill state. Regions that were filled on a prior attempt will not try to fill again.
308   uint coalesce_and_fill_regions_count = heuristics()->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
309   assert(coalesce_and_fill_regions_count <= ShenandoahHeap::heap()->num_regions(), "Sanity");
310   if (coalesce_and_fill_regions_count == 0) {
311     // No regions need to be filled.
312     abandon_collection_candidates();
313     return true;
314   }
315 
316   ShenandoahHeap* const heap = ShenandoahHeap::heap();
317   WorkerThreads* workers = heap->workers();
318   uint nworkers = workers->active_workers();
319   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count);
320 
321   log_info(gc)("Starting (or resuming) coalesce-and-fill of " UINT32_FORMAT " old heap regions", coalesce_and_fill_regions_count);
322   workers->run_task(&task);
323   if (task.is_completed()) {
324     // We no longer need to track regions that need to be coalesced and filled.
325     abandon_collection_candidates();
326     return true;
327   } else {
328     // Coalesce-and-fill has been preempted. We'll finish that effort in the future.  Do not invoke
329     // ShenandoahGeneration::prepare_gc() until coalesce-and-fill is done because it resets the mark bitmap
330     // and invokes set_mark_incomplete().  Coalesce-and-fill depends on the mark bitmap.
331     log_debug(gc)("Suspending coalesce-and-fill of old heap regions");
332     return false;
333   }
334 }
335 
336 void ShenandoahOldGeneration::transfer_pointers_from_satb() {
337   ShenandoahHeap* heap = ShenandoahHeap::heap();
338   shenandoah_assert_safepoint();
339   assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking.");
340   log_info(gc)("Transfer SATB buffers");
341   uint nworkers = heap->workers()->active_workers();
342   StrongRootsScope scope(nworkers);
343 
344   ShenandoahPurgeSATBTask purge_satb_task(task_queues());
345   heap->workers()->run_task(&purge_satb_task);
346 }
347 
348 bool ShenandoahOldGeneration::contains(oop obj) const {
349   return ShenandoahHeap::heap()->is_in_old(obj);
350 }
351 
352 void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) {
353   ShenandoahHeap* heap = ShenandoahHeap::heap();
354   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
355 
356   {
357     ShenandoahGCPhase phase(concurrent ?
358         ShenandoahPhaseTimings::final_update_region_states :
359         ShenandoahPhaseTimings::degen_gc_final_update_region_states);
360     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
361 
362     parallel_heap_region_iterate(&cl);
363     heap->assert_pinned_region_status();
364   }
365 
366   {
367     // This doesn't actually choose a collection set, but prepares a list of
368     // regions as 'candidates' for inclusion in a mixed collection.
369     ShenandoahGCPhase phase(concurrent ?
370         ShenandoahPhaseTimings::choose_cset :
371         ShenandoahPhaseTimings::degen_gc_choose_cset);
372     ShenandoahHeapLocker locker(heap->lock());
373     _old_heuristics->prepare_for_old_collections();
374   }
375 
376   {
377     // Though we did not choose a collection set above, we still may have
378     // freed up immediate garbage regions so proceed with rebuilding the free set.
379     ShenandoahGCPhase phase(concurrent ?
380         ShenandoahPhaseTimings::final_rebuild_freeset :
381         ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
382     ShenandoahHeapLocker locker(heap->lock());
383     size_t cset_young_regions, cset_old_regions;
384     size_t first_old, last_old, num_old;
385     heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old);
386     // This is just old-gen completion.  No future budgeting required here.  The only reason to rebuild the freeset here
387     // is in case there was any immediate old garbage identified.
388     heap->free_set()->rebuild(cset_young_regions, cset_old_regions);
389   }
390 }
391 
392 const char* ShenandoahOldGeneration::state_name(State state) {
393   switch (state) {
394     case WAITING_FOR_BOOTSTRAP:   return "Waiting for Bootstrap";
395     case FILLING:                 return "Coalescing";
396     case BOOTSTRAPPING:           return "Bootstrapping";
397     case MARKING:                 return "Marking";
398     case EVACUATING:              return "Evacuating";
399     case EVACUATING_AFTER_GLOBAL: return "Evacuating (G)";
400     default:
401       ShouldNotReachHere();
402       return "Unknown";
403   }
404 }
405 
406 void ShenandoahOldGeneration::transition_to(State new_state) {
407   if (_state != new_state) {
408     log_info(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
409     validate_transition(new_state);
410     _state = new_state;
411   }
412 }
413 
414 #ifdef ASSERT
415 // This diagram depicts the expected state transitions for marking the old generation
416 // and preparing for old collections. When a young generation cycle executes, the
417 // remembered set scan must visit objects in old regions. Visiting an object which
418 // has become dead on previous old cycles will result in crashes. To avoid visiting
419 // such objects, the remembered set scan will use the old generation mark bitmap when
420 // possible. It is _not_ possible to use the old generation bitmap when old marking
421 // is active (bitmap is not complete). For this reason, the old regions are made
422 // parsable _before_ the old generation bitmap is reset. The diagram does not depict
423 // cancellation of old collections by global or full collections.
424 //
425 // When a global collection supersedes an old collection, the global mark still
426 // "completes" the old mark bitmap. Subsequent remembered set scans may use the
427 // old generation mark bitmap, but any uncollected old regions must still be made parsable
428 // before the next old generation cycle begins. For this reason, a global collection may
429 // create mixed collection candidates and coalesce and fill candidates and will put
430 // the old generation in the respective states (EVACUATING or FILLING). After a Full GC,
431 // the mark bitmaps are all reset, all regions are parsable and the mark context will
432 // not be "complete". After a Full GC, remembered set scans will _not_ use the mark bitmap
433 // and we expect the old generation to be waiting for bootstrap.
434 //
435 //                              +-----------------+
436 //               +------------> |     FILLING     | <---+
437 //               |   +--------> |                 |     |
438 //               |   |          +-----------------+     |
439 //               |   |            |                     |
440 //               |   |            | Filling Complete    | <-> A global collection may
441 //               |   |            v                     |     move the old generation
442 //               |   |          +-----------------+     |     directly from waiting for
443 //           +-- |-- |--------> |     WAITING     |     |     bootstrap to filling or
444 //           |   |   |    +---- |  FOR BOOTSTRAP  | ----+     evacuating. It may also
445 //           |   |   |    |     +-----------------+           move from filling to waiting
446 //           |   |   |    |       |                           for bootstrap.
447 //           |   |   |    |       | Reset Bitmap
448 //           |   |   |    |       v
449 //           |   |   |    |     +-----------------+     +----------------------+
450 //           |   |   |    |     |    BOOTSTRAP    | <-> |       YOUNG GC       |
451 //           |   |   |    |     |                 |     | (RSet Parses Region) |
452 //           |   |   |    |     +-----------------+     +----------------------+
453 //           |   |   |    |       |
454 //           |   |   |    |       | Old Marking
455 //           |   |   |    |       v
456 //           |   |   |    |     +-----------------+     +----------------------+
457 //           |   |   |    |     |     MARKING     | <-> |       YOUNG GC       |
458 //           |   |   +--------- |                 |     | (RSet Parses Region) |
459 //           |   |        |     +-----------------+     +----------------------+
460 //           |   |        |       |
461 //           |   |        |       | Has Evacuation Candidates
462 //           |   |        |       v
463 //           |   |        |     +-----------------+     +--------------------+
464 //           |   |        +---> |    EVACUATING   | <-> |      YOUNG GC      |
465 //           |   +------------- |                 |     | (RSet Uses Bitmap) |
466 //           |                  +-----------------+     +--------------------+
467 //           |                    |
468 //           |                    | Global Cycle Coalesces and Fills Old Regions
469 //           |                    v
470 //           |                  +-----------------+     +--------------------+
471 //           +----------------- |    EVACUATING   | <-> |      YOUNG GC      |
472 //                              |   AFTER GLOBAL  |     | (RSet Uses Bitmap) |
473 //                              +-----------------+     +--------------------+
474 //
475 //
476 void ShenandoahOldGeneration::validate_transition(State new_state) {
477   ShenandoahHeap* heap = ShenandoahHeap::heap();
478   switch (new_state) {
479     case FILLING:
480       assert(_state != BOOTSTRAPPING, "Cannot begin making old regions parsable after bootstrapping");
481       assert(is_mark_complete(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state));
482       assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot begin filling without something to fill.");
483       break;
484     case WAITING_FOR_BOOTSTRAP:
485       // GC cancellation can send us back here from any state.
486       validate_waiting_for_bootstrap();
487       break;
488     case BOOTSTRAPPING:
489       assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state));
490       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates");
491       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable.");
492       break;
493     case MARKING:
494       assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state));
495       assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues.");
496       assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now.");
497       break;
498     case EVACUATING_AFTER_GLOBAL:
499       assert(_state == EVACUATING, "Must have been evacuating, state is '%s'", state_name(_state));
500       break;
501     case EVACUATING:
502       assert(_state == WAITING_FOR_BOOTSTRAP || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state));
503       assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here.");
504       break;
505     default:
506       fatal("Unknown new state");
507   }
508 }
509 
510 bool ShenandoahOldGeneration::validate_waiting_for_bootstrap() {
511   ShenandoahHeap* heap = ShenandoahHeap::heap();
512   assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark.");
513   assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping.");
514   assert(!is_concurrent_mark_in_progress(), "Cannot be marking in IDLE");
515   assert(!heap->young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE");
516   assert(!heuristics()->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE");
517   assert(heuristics()->unprocessed_old_collection_candidates() == 0, "Cannot have mixed collection candidates in IDLE");
518   return true;
519 }
520 #endif
521 
522 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
523   _old_heuristics = new ShenandoahOldHeuristics(this);
524   _old_heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval);
525   _heuristics = _old_heuristics;
526   return _heuristics;
527 }
528 
529 void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) {
530   heuristics()->record_success_concurrent(abbreviated);
531   ShenandoahHeap::heap()->shenandoah_policy()->record_success_old();
532 }
533 
534 void ShenandoahOldGeneration::handle_failed_evacuation() {
535   if (_failed_evacuation.try_set()) {
536     log_info(gc)("Old gen evac failure.");
537   }
538 }
539 
540 void ShenandoahOldGeneration::handle_failed_promotion(Thread* thread, size_t size) {
541   // We squelch excessive reports to reduce noise in logs.
542   const size_t MaxReportsPerEpoch = 4;
543   static size_t last_report_epoch = 0;
544   static size_t epoch_report_count = 0;
545   auto heap = ShenandoahGenerationalHeap::heap();
546 
547   size_t promotion_reserve;
548   size_t promotion_expended;
549 
550   const size_t gc_id = heap->control_thread()->get_gc_id();
551 
552   if ((gc_id != last_report_epoch) || (epoch_report_count++ < MaxReportsPerEpoch)) {
553     {
554       // Promotion failures should be very rare.  Invest in providing useful diagnostic info.
555       ShenandoahHeapLocker locker(heap->lock());
556       promotion_reserve = get_promoted_reserve();
557       promotion_expended = get_promoted_expended();
558     }
559     PLAB* const plab = ShenandoahThreadLocalData::plab(thread);
560     const size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining();
561     const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled";
562 
563     log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT
564                        ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT
565                        ", old capacity: " SIZE_FORMAT ", old_used: " SIZE_FORMAT ", old unaffiliated regions: " SIZE_FORMAT,
566                        size * HeapWordSize, plab == nullptr? "no": "yes",
567                        words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended,
568                        max_capacity(), used(), free_unaffiliated_regions());
569 
570     if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) {
571       log_info(gc, ergo)("Squelching additional promotion failure reports for current epoch");
572     } else if (gc_id != last_report_epoch) {
573       last_report_epoch = gc_id;
574       epoch_report_count = 1;
575     }
576   }
577 }
578 
579 void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words, bool promotion) {
580   auto heap = ShenandoahGenerationalHeap::heap();
581   auto card_scan = heap->card_scan();
582 
583   // Only register the copy of the object that won the evacuation race.
584   card_scan->register_object_without_lock(obj);
585 
586   // Mark the entire range of the evacuated object as dirty.  At next remembered set scan,
587   // we will clear dirty bits that do not hold interesting pointers.  It's more efficient to
588   // do this in batch, in a background GC thread than to try to carefully dirty only cards
589   // that hold interesting pointers right now.
590   card_scan->mark_range_as_dirty(obj, words);
591 
592   if (promotion) {
593     // This evacuation was a promotion, track this as allocation against old gen
594     increase_allocated(words * HeapWordSize);
595   }
596 }
597 
598 bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() {
599   return _old_heuristics->unprocessed_old_collection_candidates() > 0;
600 }
601 
602 size_t ShenandoahOldGeneration::unprocessed_collection_candidates_live_memory() {
603   return _old_heuristics->unprocessed_old_collection_candidates_live_memory();
604 }
605 
606 void ShenandoahOldGeneration::abandon_collection_candidates() {
607   _old_heuristics->abandon_collection_candidates();
608 }
609 
610 void ShenandoahOldGeneration::prepare_for_mixed_collections_after_global_gc() {
611   assert(is_mark_complete(), "Expected old generation mark to be complete after global cycle.");
612   _old_heuristics->prepare_for_old_collections();
613   log_info(gc)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: " SIZE_FORMAT,
614                _old_heuristics->unprocessed_old_collection_candidates(),
615                _old_heuristics->coalesce_and_fill_candidates_count());
616 }
617 
618 void ShenandoahOldGeneration::maybe_trigger_collection(size_t first_old_region, size_t last_old_region, size_t old_region_count) {
619   ShenandoahHeap* heap = ShenandoahHeap::heap();
620   const size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0;
621   const size_t allowed_old_gen_span = heap->num_regions() - (ShenandoahGenerationalHumongousReserve * heap->num_regions() / 100);
622 
623   // Tolerate lower density if total span is small.  Here's the implementation:
624   //   if old_gen spans more than 100% and density < 75%, trigger old-defrag
625   //   else if old_gen spans more than 87.5% and density < 62.5%, trigger old-defrag
626   //   else if old_gen spans more than 75% and density < 50%, trigger old-defrag
627   //   else if old_gen spans more than 62.5% and density < 37.5%, trigger old-defrag
628   //   else if old_gen spans more than 50% and density < 25%, trigger old-defrag
629   //
630   // A previous implementation was more aggressive in triggering, resulting in degraded throughput when
631   // humongous allocation was not required.
632 
633   const size_t old_available = available();
634   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
635   const size_t old_unaffiliated_available = free_unaffiliated_regions() * region_size_bytes;
636   assert(old_available >= old_unaffiliated_available, "sanity");
637   const size_t old_fragmented_available = old_available - old_unaffiliated_available;
638 
639   const size_t old_bytes_consumed = old_region_count * region_size_bytes - old_fragmented_available;
640   const size_t old_bytes_spanned = old_region_span * region_size_bytes;
641   const double old_density = ((double) old_bytes_consumed) / old_bytes_spanned;
642 
643   uint eighths = 8;
644   for (uint i = 0; i < 5; i++) {
645     size_t span_threshold = eighths * allowed_old_gen_span / 8;
646     double density_threshold = (eighths - 2) / 8.0;
647     if ((old_region_span >= span_threshold) && (old_density < density_threshold)) {
648       heuristics()->trigger_old_is_fragmented(old_density, first_old_region, last_old_region);
649       break;
650     }
651     eighths--;
652   }
653 
654   const size_t old_used = used() + get_humongous_waste();
655   const size_t trigger_threshold = usage_trigger_threshold();
656   // Detects unsigned arithmetic underflow
657   assert(old_used <= heap->free_set()->capacity(),
658          "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")",
659          used(), get_humongous_waste(), heap->free_set()->capacity());
660 
661   if (old_used > trigger_threshold) {
662     heuristics()->trigger_old_has_grown();
663   }
664 }
665 
666 void ShenandoahOldGeneration::parallel_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
667   // Iterate over old and free regions (exclude young).
668   ShenandoahExcludeRegionClosure<YOUNG_GENERATION> exclude_cl(cl);
669   ShenandoahGeneration::parallel_region_iterate_free(&exclude_cl);
670 }
671 
672 void ShenandoahOldGeneration::set_parseable(bool parseable) {
673   _is_parseable = parseable;
674   if (_is_parseable) {
675     // The current state would have been chosen during final mark of the global
676     // collection, _before_ any decisions about class unloading have been made.
677     //
678     // After unloading classes, we have made the old generation regions parseable.
679     // We can skip filling or transition to a state that knows everything has
680     // already been filled.
681     switch (state()) {
682       case ShenandoahOldGeneration::EVACUATING:
683         transition_to(ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL);
684         break;
685       case ShenandoahOldGeneration::FILLING:
686         assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Expected no mixed collection candidates");
687         assert(_old_heuristics->coalesce_and_fill_candidates_count() > 0, "Expected coalesce and fill candidates");
688         // When the heuristic put the old generation in this state, it didn't know
689         // that we would unload classes and make everything parseable. But, we know
690         // that now so we can override this state.
691         // TODO: It would be nicer if we didn't have to 'correct' this situation.
692         abandon_collection_candidates();
693         transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
694         break;
695       default:
696         // We can get here during a full GC. The full GC will cancel anything
697         // happening in the old generation and return it to the waiting for bootstrap
698         // state. The full GC will then record that the old regions are parseable
699         // after rebuilding the remembered set.
700         assert(is_idle(), "Unexpected state %s at end of global GC", state_name());
701         break;
702     }
703   }
704 }
705 
706 void ShenandoahOldGeneration::complete_mixed_evacuations() {
707   assert(is_doing_mixed_evacuations(), "Mixed evacuations should be in progress");
708   if (!_old_heuristics->has_coalesce_and_fill_candidates()) {
709     // No candidate regions to coalesce and fill
710     transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
711     return;
712   }
713 
714   if (state() == ShenandoahOldGeneration::EVACUATING) {
715     transition_to(ShenandoahOldGeneration::FILLING);
716     return;
717   }
718 
719   // Here, we have no more candidates for mixed collections. The candidates for coalescing
720   // and filling have already been processed during the global cycle, so there is nothing
721   // more to do.
722   assert(state() == ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL, "Should be evacuating after a global cycle");
723   abandon_collection_candidates();
724   transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
725 }
726 
727 void ShenandoahOldGeneration::abandon_mixed_evacuations() {
728   switch(state()) {
729     case ShenandoahOldGeneration::EVACUATING:
730       transition_to(ShenandoahOldGeneration::FILLING);
731       break;
732     case ShenandoahOldGeneration::EVACUATING_AFTER_GLOBAL:
733       abandon_collection_candidates();
734       transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
735       break;
736     default:
737       ShouldNotReachHere();
738       break;
739   }
740 }