1 /*
  2  * Copyright (c) 2021, Amazon.com, Inc. or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shared/strongRootsScope.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
 31 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
 32 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
 33 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 34 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 37 #include "gc/shenandoah/shenandoahHeap.hpp"
 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 40 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 43 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 46 #include "gc/shenandoah/shenandoahStringDedup.hpp"
 47 #include "gc/shenandoah/shenandoahUtils.hpp"
 48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 49 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 50 #include "prims/jvmtiTagMap.hpp"
 51 #include "runtime/threads.hpp"
 52 #include "utilities/events.hpp"
 53 
 54 class ShenandoahFlushAllSATB : public ThreadClosure {
 55  private:
 56   SATBMarkQueueSet& _satb_qset;
 57   uintx _claim_token;
 58 
 59  public:
 60   explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) :
 61     _satb_qset(satb_qset),
 62     _claim_token(Threads::thread_claim_token()) { }
 63 
 64   void do_thread(Thread* thread) {
 65     if (thread->claim_threads_do(true, _claim_token)) {
 66       // Transfer any partial buffer to the qset for completed buffer processing.
 67       _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
 68     }
 69   }
 70 };
 71 
 72 class ShenandoahProcessOldSATB : public SATBBufferClosure {
 73  private:
 74   ShenandoahObjToScanQueue* _queue;
 75   ShenandoahHeap* _heap;
 76   ShenandoahMarkingContext* const _mark_context;
 77 
 78  public:
 79   size_t _trashed_oops;
 80 
 81   explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) :
 82     _queue(q),
 83     _heap(ShenandoahHeap::heap()),
 84     _mark_context(_heap->marking_context()),
 85     _trashed_oops(0) {}
 86 
 87   void do_buffer(void **buffer, size_t size) {
 88     assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
 89     for (size_t i = 0; i < size; ++i) {
 90       oop *p = (oop *) &buffer[i];
 91       ShenandoahHeapRegion* region = _heap->heap_region_containing(*p);
 92       if (region->is_old() && region->is_active()) {
 93           ShenandoahMark::mark_through_ref<oop, OLD>(p, _queue, NULL, _mark_context, false);
 94       } else {
 95         ++_trashed_oops;
 96       }
 97     }
 98   }
 99 };
100 
101 class ShenandoahPurgeSATBTask : public WorkerTask {
102 private:
103   ShenandoahObjToScanQueueSet* _mark_queues;
104 
105 public:
106   volatile size_t _trashed_oops;
107 
108   explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) :
109     WorkerTask("Purge SATB"),
110     _mark_queues(queues),
111     _trashed_oops(0) {
112     Threads::change_thread_claim_token();
113   }
114 
115   ~ShenandoahPurgeSATBTask() {
116     if (_trashed_oops > 0) {
117       log_info(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers.", _trashed_oops);
118     }
119   }
120 
121   void work(uint worker_id) {
122     ShenandoahParallelWorkerSession worker_session(worker_id);
123     ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
124     ShenandoahFlushAllSATB flusher(satb_queues);
125     Threads::threads_do(&flusher);
126 
127     ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id);
128     ShenandoahProcessOldSATB processor(mark_queue);
129     while (satb_queues.apply_closure_to_completed_buffer(&processor)) {}
130 
131     Atomic::add(&_trashed_oops, processor._trashed_oops);
132   }
133 };
134 
135 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
136  private:
137   uint _nworkers;
138   ShenandoahHeapRegion** _coalesce_and_fill_region_array;
139   uint _coalesce_and_fill_region_count;
140   volatile bool _is_preempted;
141 
142  public:
143   ShenandoahConcurrentCoalesceAndFillTask(uint nworkers, ShenandoahHeapRegion** coalesce_and_fill_region_array,
144                                           uint region_count) :
145     WorkerTask("Shenandoah Concurrent Coalesce and Fill"),
146     _nworkers(nworkers),
147     _coalesce_and_fill_region_array(coalesce_and_fill_region_array),
148     _coalesce_and_fill_region_count(region_count),
149     _is_preempted(false) {
150   }
151 
152   void work(uint worker_id) {
153     for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) {
154       ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx];
155       if (r->is_humongous()) {
156         // there's only one object in this region and it's not garbage, so no need to coalesce or fill
157         continue;
158       }
159 
160       if (!r->oop_fill_and_coalesce()) {
161         // Coalesce and fill has been preempted
162         Atomic::store(&_is_preempted, true);
163         return;
164       }
165     }
166   }
167 
168   // Value returned from is_completed() is only valid after all worker thread have terminated.
169   bool is_completed() {
170     return !Atomic::load(&_is_preempted);
171   }
172 };
173 
174 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity)
175   : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity),
176     _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
177     _state(IDLE)
178 {
179   // Always clear references for old generation
180   ref_processor()->set_soft_reference_policy(true);
181 }
182 
183 const char* ShenandoahOldGeneration::name() const {
184   return "OLD";
185 }
186 
187 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const {
188   return region->affiliation() != YOUNG_GENERATION;
189 }
190 
191 void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
192   ShenandoahGenerationRegionClosure<OLD> old_regions(cl);
193   ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions);
194 }
195 
196 void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
197   ShenandoahGenerationRegionClosure<OLD> old_regions(cl);
198   ShenandoahHeap::heap()->heap_region_iterate(&old_regions);
199 }
200 
201 void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) {
202   ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress);
203 }
204 
205 bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() {
206   return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress();
207 }
208 
209 void ShenandoahOldGeneration::cancel_marking() {
210   if (is_concurrent_mark_in_progress()) {
211     log_info(gc)("Abandon satb buffers.");
212     ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
213   }
214 
215   ShenandoahGeneration::cancel_marking();
216 }
217 
218 void ShenandoahOldGeneration::prepare_gc() {
219 
220   // Make the old generation regions parseable, so they can be safely
221   // scanned when looking for objects in memory indicated by dirty cards.
222   entry_coalesce_and_fill();
223 
224   // Now that we have made the old generation parseable, it is safe to reset the mark bitmap.
225   {
226     static const char* msg = "Concurrent reset (OLD)";
227     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
228     ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
229                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
230                                 msg);
231     ShenandoahGeneration::prepare_gc();
232   }
233 }
234 
235 bool ShenandoahOldGeneration::entry_coalesce_and_fill() {
236   char msg[1024];
237   ShenandoahHeap* const heap = ShenandoahHeap::heap();
238 
239   ShenandoahConcurrentPhase gc_phase("Coalescing and filling (OLD)", ShenandoahPhaseTimings::coalesce_and_fill);
240 
241   // TODO: I don't think we're using these concurrent collection counters correctly.
242   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
243   EventMark em("%s", msg);
244   ShenandoahWorkerScope scope(heap->workers(),
245                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
246                               "concurrent coalesce and fill");
247 
248   return coalesce_and_fill();
249 }
250 
251 bool ShenandoahOldGeneration::coalesce_and_fill() {
252   ShenandoahHeap* const heap = ShenandoahHeap::heap();
253   heap->set_prepare_for_old_mark_in_progress(true);
254   transition_to(FILLING);
255 
256   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
257   WorkerThreads* workers = heap->workers();
258   uint nworkers = workers->active_workers();
259 
260   log_debug(gc)("Starting (or resuming) coalesce-and-fill of old heap regions");
261   uint coalesce_and_fill_regions_count = old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
262   assert(coalesce_and_fill_regions_count <= heap->num_regions(), "Sanity");
263   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count);
264 
265   workers->run_task(&task);
266   if (task.is_completed()) {
267     // Remember that we're done with coalesce-and-fill.
268     heap->set_prepare_for_old_mark_in_progress(false);
269     transition_to(BOOTSTRAPPING);
270     return true;
271   } else {
272     log_debug(gc)("Suspending coalesce-and-fill of old heap regions");
273     // Otherwise, we got preempted before the work was done.
274     return false;
275   }
276 }
277 
278 void ShenandoahOldGeneration::transfer_pointers_from_satb() {
279   ShenandoahHeap* heap = ShenandoahHeap::heap();
280   shenandoah_assert_safepoint();
281   assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking.");
282   log_info(gc)("Transfer satb buffers.");
283   uint nworkers = heap->workers()->active_workers();
284   StrongRootsScope scope(nworkers);
285 
286   ShenandoahPurgeSATBTask purge_satb_task(task_queues());
287   heap->workers()->run_task(&purge_satb_task);
288 }
289 
290 bool ShenandoahOldGeneration::contains(oop obj) const {
291   return ShenandoahHeap::heap()->is_in_old(obj);
292 }
293 
294 void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) {
295   ShenandoahHeap* heap = ShenandoahHeap::heap();
296   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
297 
298   {
299     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states : ShenandoahPhaseTimings::degen_gc_final_update_region_states);
300     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
301 
302     parallel_heap_region_iterate(&cl);
303     heap->assert_pinned_region_status();
304   }
305 
306   {
307     // This doesn't actually choose a collection set, but prepares a list of
308     // regions as 'candidates' for inclusion in a mixed collection.
309     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset : ShenandoahPhaseTimings::degen_gc_choose_cset);
310     ShenandoahHeapLocker locker(heap->lock());
311     heuristics()->choose_collection_set(nullptr, nullptr);
312   }
313 
314   {
315     // Though we did not choose a collection set above, we still may have
316     // freed up immediate garbage regions so proceed with rebuilding the free set.
317     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
318     ShenandoahHeapLocker locker(heap->lock());
319     heap->free_set()->rebuild();
320   }
321 }
322 
323 const char* ShenandoahOldGeneration::state_name(State state) {
324   switch (state) {
325     case IDLE:          return "Idle";
326     case FILLING:       return "Coalescing";
327     case BOOTSTRAPPING: return "Bootstrapping";
328     case MARKING:       return "Marking";
329     case WAITING:       return "Waiting";
330     default:
331       ShouldNotReachHere();
332       return "Unknown";
333   }
334 }
335 
336 void ShenandoahOldGeneration::transition_to(State new_state) {
337   if (_state != new_state) {
338     log_info(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
339     assert(validate_transition(new_state), "Invalid state transition.");
340     _state = new_state;
341   }
342 }
343 
344 #ifdef ASSERT
345 // This diagram depicts the expected state transitions for marking the old generation
346 // and preparing for old collections. When a young generation cycle executes, the
347 // remembered set scan must visit objects in old regions. Visiting an object which
348 // has become dead on previous old cycles will result in crashes. To avoid visiting
349 // such objects, the remembered set scan will use the old generation mark bitmap when
350 // possible. It is _not_ possible to use the old generation bitmap when old marking
351 // is active (bitmap is not complete). For this reason, the old regions are made
352 // parseable _before_ the old generation bitmap is reset. The diagram does not depict
353 // global and full collections, both of which cancel any old generation activity.
354 //
355 //                              +-----------------+
356 //               +------------> |      IDLE       |
357 //               |   +--------> |                 |
358 //               |   |          +-----------------+
359 //               |   |            |
360 //               |   |            | Begin Old Mark
361 //               |   |            v
362 //               |   |          +-----------------+     +--------------------+
363 //               |   |          |     FILLING     | <-> |      YOUNG GC      |
364 //               |   |          |                 |     | (RSet Uses Bitmap) |
365 //               |   |          +-----------------+     +--------------------+
366 //               |   |            |
367 //               |   |            | Reset Bitmap
368 //               |   |            v
369 //               |   |          +-----------------+
370 //               |   |          |    BOOTSTRAP    |
371 //               |   |          |                 |
372 //               |   |          +-----------------+
373 //               |   |            |
374 //               |   |            | Continue Marking
375 //               |   |            v
376 //               |   |          +-----------------+     +----------------------+
377 //               |   |          |    MARKING      | <-> |       YOUNG GC       |
378 //               |   +----------|                 |     | (RSet Parses Region) |
379 //               |              +-----------------+     +----------------------+
380 //               |                |
381 //               |                | Has Candidates
382 //               |                v
383 //               |              +-----------------+
384 //               |              |     WAITING     |
385 //               +------------- |                 |
386 //                              +-----------------+
387 //
388 bool ShenandoahOldGeneration::validate_transition(State new_state) {
389   ShenandoahHeap* heap = ShenandoahHeap::heap();
390   switch (new_state) {
391     case IDLE:
392       assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become idle during old mark.");
393       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot become idle with collection candidates");
394       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot become idle while making old generation parseable.");
395       assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become idle when setup for bootstrapping.");
396       return true;
397     case FILLING:
398       assert(_state == IDLE, "Cannot begin filling without first being idle.");
399       assert(heap->is_prepare_for_old_mark_in_progress(), "Should be preparing for old mark now.");
400       return true;
401     case BOOTSTRAPPING:
402       assert(_state == FILLING, "Cannot reset bitmap without making old regions parseable.");
403       // assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Cannot bootstrap without old mark queues.");
404       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parseable.");
405       return true;
406     case MARKING:
407       assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking.");
408       assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues.");
409       assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now.");
410       return true;
411     case WAITING:
412       assert(_state == MARKING, "Cannot have old collection candidates without first marking.");
413       assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here.");
414       return true;
415     default:
416       ShouldNotReachHere();
417       return false;
418   }
419 }
420 #endif
421 
422 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
423   assert(ShenandoahOldGCHeuristics != NULL, "ShenandoahOldGCHeuristics should not equal NULL");
424   ShenandoahHeuristics* trigger;
425   if (strcmp(ShenandoahOldGCHeuristics, "static") == 0) {
426     trigger = new ShenandoahStaticHeuristics(this);
427   } else if (strcmp(ShenandoahOldGCHeuristics, "adaptive") == 0) {
428     trigger = new ShenandoahAdaptiveHeuristics(this);
429   } else if (strcmp(ShenandoahOldGCHeuristics, "compact") == 0) {
430     trigger = new ShenandoahCompactHeuristics(this);
431   } else {
432     vm_exit_during_initialization("Unknown -XX:ShenandoahOldGCHeuristics option (must be one of: static, adaptive, compact)");
433     ShouldNotReachHere();
434     return NULL;
435   }
436   trigger->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval);
437   _old_heuristics = new ShenandoahOldHeuristics(this, trigger);
438   _heuristics = _old_heuristics;
439   return _heuristics;
440 }
441 
442 void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) {
443   heuristics()->record_success_concurrent(abbreviated);
444   ShenandoahHeap::heap()->shenandoah_policy()->record_success_old();
445 }