1 /*
  2  * Copyright (c) 2021, Amazon.com, Inc. or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shared/strongRootsScope.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
 31 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
 32 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
 33 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 34 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 37 #include "gc/shenandoah/shenandoahHeap.hpp"
 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 40 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 43 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 46 #include "gc/shenandoah/shenandoahStringDedup.hpp"
 47 #include "gc/shenandoah/shenandoahUtils.hpp"
 48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 49 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 50 #include "prims/jvmtiTagMap.hpp"
 51 #include "runtime/threads.hpp"
 52 #include "utilities/events.hpp"
 53 
 54 class ShenandoahFlushAllSATB : public ThreadClosure {
 55  private:
 56   SATBMarkQueueSet& _satb_qset;
 57 
 58  public:
 59   explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) :
 60     _satb_qset(satb_qset) {}
 61 
 62   void do_thread(Thread* thread) {
 63     // Transfer any partial buffer to the qset for completed buffer processing.
 64     _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
 65   }
 66 };
 67 
 68 class ShenandoahProcessOldSATB : public SATBBufferClosure {
 69  private:
 70   ShenandoahObjToScanQueue* _queue;
 71   ShenandoahHeap* _heap;
 72   ShenandoahMarkingContext* const _mark_context;
 73 
 74  public:
 75   size_t _trashed_oops;
 76 
 77   explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) :
 78     _queue(q),
 79     _heap(ShenandoahHeap::heap()),
 80     _mark_context(_heap->marking_context()),
 81     _trashed_oops(0) {}
 82 
 83   void do_buffer(void **buffer, size_t size) {
 84     assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
 85     for (size_t i = 0; i < size; ++i) {
 86       oop *p = (oop *) &buffer[i];
 87       ShenandoahHeapRegion* region = _heap->heap_region_containing(*p);
 88       if (region->is_old() && region->is_active()) {
 89           ShenandoahMark::mark_through_ref<oop, OLD>(p, _queue, nullptr, _mark_context, false);
 90       } else {
 91         ++_trashed_oops;
 92       }
 93     }
 94   }
 95 };
 96 
 97 class ShenandoahPurgeSATBTask : public WorkerTask {
 98 private:
 99   ShenandoahObjToScanQueueSet* _mark_queues;
100 
101 public:
102   volatile size_t _trashed_oops;
103 
104   explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) :
105     WorkerTask("Purge SATB"),
106     _mark_queues(queues),
107     _trashed_oops(0) {
108     Threads::change_thread_claim_token();
109   }
110 
111   ~ShenandoahPurgeSATBTask() {
112     if (_trashed_oops > 0) {
113       log_info(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers.", _trashed_oops);
114     }
115   }
116 
117   void work(uint worker_id) {
118     ShenandoahParallelWorkerSession worker_session(worker_id);
119     ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
120     ShenandoahFlushAllSATB flusher(satb_queues);
121     Threads::possibly_parallel_threads_do(true /* is_par */, &flusher);
122 
123     ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id);
124     ShenandoahProcessOldSATB processor(mark_queue);
125     while (satb_queues.apply_closure_to_completed_buffer(&processor)) {}
126 
127     Atomic::add(&_trashed_oops, processor._trashed_oops);
128   }
129 };
130 
131 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
132  private:
133   uint _nworkers;
134   ShenandoahHeapRegion** _coalesce_and_fill_region_array;
135   uint _coalesce_and_fill_region_count;
136   volatile bool _is_preempted;
137 
138  public:
139   ShenandoahConcurrentCoalesceAndFillTask(uint nworkers, ShenandoahHeapRegion** coalesce_and_fill_region_array,
140                                           uint region_count) :
141     WorkerTask("Shenandoah Concurrent Coalesce and Fill"),
142     _nworkers(nworkers),
143     _coalesce_and_fill_region_array(coalesce_and_fill_region_array),
144     _coalesce_and_fill_region_count(region_count),
145     _is_preempted(false) {
146   }
147 
148   void work(uint worker_id) {
149     for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) {
150       ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx];
151       if (r->is_humongous()) {
152         // there's only one object in this region and it's not garbage, so no need to coalesce or fill
153         continue;
154       }
155 
156       if (!r->oop_fill_and_coalesce()) {
157         // Coalesce and fill has been preempted
158         Atomic::store(&_is_preempted, true);
159         return;
160       }
161     }
162   }
163 
164   // Value returned from is_completed() is only valid after all worker thread have terminated.
165   bool is_completed() {
166     return !Atomic::load(&_is_preempted);
167   }
168 };
169 
170 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity)
171   : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity),
172     _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
173     _state(IDLE)
174 {
175   // Always clear references for old generation
176   ref_processor()->set_soft_reference_policy(true);
177 }
178 
179 const char* ShenandoahOldGeneration::name() const {
180   return "OLD";
181 }
182 
183 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const {
184   return region->affiliation() != YOUNG_GENERATION;
185 }
186 
187 void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
188   ShenandoahGenerationRegionClosure<OLD> old_regions(cl);
189   ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions);
190 }
191 
192 void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
193   ShenandoahGenerationRegionClosure<OLD> old_regions(cl);
194   ShenandoahHeap::heap()->heap_region_iterate(&old_regions);
195 }
196 
197 void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) {
198   ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress);
199 }
200 
201 bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() {
202   return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress();
203 }
204 
205 void ShenandoahOldGeneration::cancel_marking() {
206   if (is_concurrent_mark_in_progress()) {
207     log_info(gc)("Abandon satb buffers.");
208     ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
209   }
210 
211   ShenandoahGeneration::cancel_marking();
212 }
213 
214 void ShenandoahOldGeneration::prepare_gc() {
215 
216   // Make the old generation regions parseable, so they can be safely
217   // scanned when looking for objects in memory indicated by dirty cards.
218   if (entry_coalesce_and_fill()) {
219     // Now that we have made the old generation parseable, it is safe to reset the mark bitmap.
220     static const char* msg = "Concurrent reset (OLD)";
221     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
222     ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
223                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
224                                 msg);
225     ShenandoahGeneration::prepare_gc();
226   }
227   // Else, coalesce-and-fill has been preempted and we'll finish that effort in the future.  Do not invoke
228   // ShenandoahGeneration::prepare_gc() until coalesce-and-fill is done because it resets the mark bitmap
229   // and invokes set_mark_incomplete().  Coalesce-and-fill depends on the mark bitmap.
230 }
231 
232 bool ShenandoahOldGeneration::entry_coalesce_and_fill() {
233   char msg[1024];
234   ShenandoahHeap* const heap = ShenandoahHeap::heap();
235 
236   ShenandoahConcurrentPhase gc_phase("Coalescing and filling (OLD)", ShenandoahPhaseTimings::coalesce_and_fill);
237 
238   // TODO: I don't think we're using these concurrent collection counters correctly.
239   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
240   EventMark em("%s", msg);
241   ShenandoahWorkerScope scope(heap->workers(),
242                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
243                               "concurrent coalesce and fill");
244 
245   return coalesce_and_fill();
246 }
247 
248 bool ShenandoahOldGeneration::coalesce_and_fill() {
249   ShenandoahHeap* const heap = ShenandoahHeap::heap();
250   heap->set_prepare_for_old_mark_in_progress(true);
251   transition_to(FILLING);
252 
253   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
254   WorkerThreads* workers = heap->workers();
255   uint nworkers = workers->active_workers();
256 
257   log_debug(gc)("Starting (or resuming) coalesce-and-fill of old heap regions");
258   uint coalesce_and_fill_regions_count = old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
259   assert(coalesce_and_fill_regions_count <= heap->num_regions(), "Sanity");
260   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count);
261 
262   workers->run_task(&task);
263   if (task.is_completed()) {
264     // Remember that we're done with coalesce-and-fill.
265     heap->set_prepare_for_old_mark_in_progress(false);
266     transition_to(BOOTSTRAPPING);
267     return true;
268   } else {
269     log_debug(gc)("Suspending coalesce-and-fill of old heap regions");
270     // Otherwise, we got preempted before the work was done.
271     return false;
272   }
273 }
274 
275 void ShenandoahOldGeneration::transfer_pointers_from_satb() {
276   ShenandoahHeap* heap = ShenandoahHeap::heap();
277   shenandoah_assert_safepoint();
278   assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking.");
279   log_info(gc)("Transfer satb buffers.");
280   uint nworkers = heap->workers()->active_workers();
281   StrongRootsScope scope(nworkers);
282 
283   ShenandoahPurgeSATBTask purge_satb_task(task_queues());
284   heap->workers()->run_task(&purge_satb_task);
285 }
286 
287 bool ShenandoahOldGeneration::contains(oop obj) const {
288   return ShenandoahHeap::heap()->is_in_old(obj);
289 }
290 
291 void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) {
292   ShenandoahHeap* heap = ShenandoahHeap::heap();
293   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
294 
295   {
296     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states : ShenandoahPhaseTimings::degen_gc_final_update_region_states);
297     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
298 
299     parallel_heap_region_iterate(&cl);
300     heap->assert_pinned_region_status();
301   }
302 
303   {
304     // This doesn't actually choose a collection set, but prepares a list of
305     // regions as 'candidates' for inclusion in a mixed collection.
306     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset : ShenandoahPhaseTimings::degen_gc_choose_cset);
307     ShenandoahHeapLocker locker(heap->lock());
308     heuristics()->choose_collection_set(nullptr, nullptr);
309   }
310 
311   {
312     // Though we did not choose a collection set above, we still may have
313     // freed up immediate garbage regions so proceed with rebuilding the free set.
314     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset : ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
315     ShenandoahHeapLocker locker(heap->lock());
316     heap->free_set()->rebuild();
317   }
318 }
319 
320 const char* ShenandoahOldGeneration::state_name(State state) {
321   switch (state) {
322     case IDLE:          return "Idle";
323     case FILLING:       return "Coalescing";
324     case BOOTSTRAPPING: return "Bootstrapping";
325     case MARKING:       return "Marking";
326     case WAITING:       return "Waiting";
327     default:
328       ShouldNotReachHere();
329       return "Unknown";
330   }
331 }
332 
333 void ShenandoahOldGeneration::transition_to(State new_state) {
334   if (_state != new_state) {
335     log_info(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
336     assert(validate_transition(new_state), "Invalid state transition.");
337     _state = new_state;
338   }
339 }
340 
341 #ifdef ASSERT
342 // This diagram depicts the expected state transitions for marking the old generation
343 // and preparing for old collections. When a young generation cycle executes, the
344 // remembered set scan must visit objects in old regions. Visiting an object which
345 // has become dead on previous old cycles will result in crashes. To avoid visiting
346 // such objects, the remembered set scan will use the old generation mark bitmap when
347 // possible. It is _not_ possible to use the old generation bitmap when old marking
348 // is active (bitmap is not complete). For this reason, the old regions are made
349 // parseable _before_ the old generation bitmap is reset. The diagram does not depict
350 // global and full collections, both of which cancel any old generation activity.
351 //
352 //                              +-----------------+
353 //               +------------> |      IDLE       |
354 //               |   +--------> |                 |
355 //               |   |          +-----------------+
356 //               |   |            |
357 //               |   |            | Begin Old Mark
358 //               |   |            v
359 //               |   |          +-----------------+     +--------------------+
360 //               |   |          |     FILLING     | <-> |      YOUNG GC      |
361 //               |   |          |                 |     | (RSet Uses Bitmap) |
362 //               |   |          +-----------------+     +--------------------+
363 //               |   |            |
364 //               |   |            | Reset Bitmap
365 //               |   |            v
366 //               |   |          +-----------------+
367 //               |   |          |    BOOTSTRAP    |
368 //               |   |          |                 |
369 //               |   |          +-----------------+
370 //               |   |            |
371 //               |   |            | Continue Marking
372 //               |   |            v
373 //               |   |          +-----------------+     +----------------------+
374 //               |   |          |    MARKING      | <-> |       YOUNG GC       |
375 //               |   +----------|                 |     | (RSet Parses Region) |
376 //               |              +-----------------+     +----------------------+
377 //               |                |
378 //               |                | Has Candidates
379 //               |                v
380 //               |              +-----------------+
381 //               |              |     WAITING     |
382 //               +------------- |                 |
383 //                              +-----------------+
384 //
385 bool ShenandoahOldGeneration::validate_transition(State new_state) {
386   ShenandoahHeap* heap = ShenandoahHeap::heap();
387   switch (new_state) {
388     case IDLE:
389       assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become idle during old mark.");
390       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot become idle with collection candidates");
391       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot become idle while making old generation parseable.");
392       assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become idle when setup for bootstrapping.");
393       return true;
394     case FILLING:
395       assert(_state == IDLE, "Cannot begin filling without first being idle.");
396       assert(heap->is_prepare_for_old_mark_in_progress(), "Should be preparing for old mark now.");
397       return true;
398     case BOOTSTRAPPING:
399       assert(_state == FILLING, "Cannot reset bitmap without making old regions parseable.");
400       // assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Cannot bootstrap without old mark queues.");
401       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parseable.");
402       return true;
403     case MARKING:
404       assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking.");
405       assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues.");
406       assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now.");
407       return true;
408     case WAITING:
409       assert(_state == MARKING, "Cannot have old collection candidates without first marking.");
410       assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here.");
411       return true;
412     default:
413       ShouldNotReachHere();
414       return false;
415   }
416 }
417 #endif
418 
419 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
420   assert(ShenandoahOldGCHeuristics != nullptr, "ShenandoahOldGCHeuristics should not be unset");
421   ShenandoahHeuristics* trigger;
422   if (strcmp(ShenandoahOldGCHeuristics, "static") == 0) {
423     trigger = new ShenandoahStaticHeuristics(this);
424   } else if (strcmp(ShenandoahOldGCHeuristics, "adaptive") == 0) {
425     trigger = new ShenandoahAdaptiveHeuristics(this);
426   } else if (strcmp(ShenandoahOldGCHeuristics, "compact") == 0) {
427     trigger = new ShenandoahCompactHeuristics(this);
428   } else {
429     vm_exit_during_initialization("Unknown -XX:ShenandoahOldGCHeuristics option (must be one of: static, adaptive, compact)");
430     ShouldNotReachHere();
431     return nullptr;
432   }
433   trigger->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval);
434   _old_heuristics = new ShenandoahOldHeuristics(this, trigger);
435   _heuristics = _old_heuristics;
436   return _heuristics;
437 }
438 
439 void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) {
440   heuristics()->record_success_concurrent(abbreviated);
441   ShenandoahHeap::heap()->shenandoah_policy()->record_success_old();
442 }