1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shared/strongRootsScope.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
 31 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
 32 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
 33 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 34 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 37 #include "gc/shenandoah/shenandoahHeap.hpp"
 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 40 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 43 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 46 #include "gc/shenandoah/shenandoahStringDedup.hpp"
 47 #include "gc/shenandoah/shenandoahUtils.hpp"
 48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 49 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 50 #include "prims/jvmtiTagMap.hpp"
 51 #include "runtime/threads.hpp"
 52 #include "utilities/events.hpp"
 53 
 54 class ShenandoahFlushAllSATB : public ThreadClosure {
 55 private:
 56   SATBMarkQueueSet& _satb_qset;
 57 
 58 public:
 59   explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) :
 60     _satb_qset(satb_qset) {}
 61 
 62   void do_thread(Thread* thread) {
 63     // Transfer any partial buffer to the qset for completed buffer processing.
 64     _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
 65   }
 66 };
 67 
 68 class ShenandoahProcessOldSATB : public SATBBufferClosure {
 69 private:
 70   ShenandoahObjToScanQueue*       _queue;
 71   ShenandoahHeap*                 _heap;
 72   ShenandoahMarkingContext* const _mark_context;
 73   size_t                          _trashed_oops;
 74 
 75 public:
 76   explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) :
 77     _queue(q),
 78     _heap(ShenandoahHeap::heap()),
 79     _mark_context(_heap->marking_context()),
 80     _trashed_oops(0) {}
 81 
 82   void do_buffer(void** buffer, size_t size) {
 83     assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
 84     for (size_t i = 0; i < size; ++i) {
 85       oop *p = (oop *) &buffer[i];
 86       ShenandoahHeapRegion* region = _heap->heap_region_containing(*p);
 87       if (region->is_old() && region->is_active()) {
 88           ShenandoahMark::mark_through_ref<oop, OLD>(p, _queue, nullptr, _mark_context, false);
 89       } else {
 90         _trashed_oops++;
 91       }
 92     }
 93   }
 94 
 95   size_t trashed_oops() {
 96     return _trashed_oops;
 97   }
 98 };
 99 
100 class ShenandoahPurgeSATBTask : public WorkerTask {
101 private:
102   ShenandoahObjToScanQueueSet* _mark_queues;
103   volatile size_t             _trashed_oops;
104 
105 public:
106   explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) :
107     WorkerTask("Purge SATB"),
108     _mark_queues(queues),
109     _trashed_oops(0) {
110     Threads::change_thread_claim_token();
111   }
112 
113   ~ShenandoahPurgeSATBTask() {
114     if (_trashed_oops > 0) {
115       log_info(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers", _trashed_oops);
116     }
117   }
118 
119   void work(uint worker_id) {
120     ShenandoahParallelWorkerSession worker_session(worker_id);
121     ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
122     ShenandoahFlushAllSATB flusher(satb_queues);
123     Threads::possibly_parallel_threads_do(true /* is_par */, &flusher);
124 
125     ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id);
126     ShenandoahProcessOldSATB processor(mark_queue);
127     while (satb_queues.apply_closure_to_completed_buffer(&processor)) {}
128 
129     Atomic::add(&_trashed_oops, processor.trashed_oops());
130   }
131 };
132 
133 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
134 private:
135   uint                    _nworkers;
136   ShenandoahHeapRegion**  _coalesce_and_fill_region_array;
137   uint                    _coalesce_and_fill_region_count;
138   volatile bool           _is_preempted;
139 
140 public:
141   ShenandoahConcurrentCoalesceAndFillTask(uint nworkers,
142                                           ShenandoahHeapRegion** coalesce_and_fill_region_array,
143                                           uint region_count) :
144     WorkerTask("Shenandoah Concurrent Coalesce and Fill"),
145     _nworkers(nworkers),
146     _coalesce_and_fill_region_array(coalesce_and_fill_region_array),
147     _coalesce_and_fill_region_count(region_count),
148     _is_preempted(false) {
149   }
150 
151   void work(uint worker_id) {
152     for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) {
153       ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx];
154       if (r->is_humongous()) {
155         // There is only one object in this region and it is not garbage,
156         // so no need to coalesce or fill.
157         continue;
158       }
159 
160       if (!r->oop_fill_and_coalesce()) {
161         // Coalesce and fill has been preempted
162         Atomic::store(&_is_preempted, true);
163         return;
164       }
165     }
166   }
167 
168   // Value returned from is_completed() is only valid after all worker thread have terminated.
169   bool is_completed() {
170     return !Atomic::load(&_is_preempted);
171   }
172 };
173 
174 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity)
175   : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity),
176     _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
177     _state(IDLE),
178     _growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION)
179 {
180   _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
181   // Always clear references for old generation
182   ref_processor()->set_soft_reference_policy(true);
183 }
184 
185 size_t ShenandoahOldGeneration::get_live_bytes_after_last_mark() const {
186   return _live_bytes_after_last_mark;
187 }
188 
189 void ShenandoahOldGeneration::set_live_bytes_after_last_mark(size_t bytes) {
190   _live_bytes_after_last_mark = bytes;
191   if (_growth_before_compaction > MINIMUM_GROWTH_BEFORE_COMPACTION) {
192     _growth_before_compaction /= 2;
193   }
194 }
195 
196 size_t ShenandoahOldGeneration::usage_trigger_threshold() const {
197   size_t result = _live_bytes_after_last_mark + (_live_bytes_after_last_mark * _growth_before_compaction) / FRACTIONAL_DENOMINATOR;
198   return result;
199 }
200 
201 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const {
202   // TODO: Should this be region->is_old() instead?
203   return !region->is_young();
204 }
205 
206 void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
207   ShenandoahGenerationRegionClosure<OLD> old_regions(cl);
208   ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions);
209 }
210 
211 void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
212   ShenandoahGenerationRegionClosure<OLD> old_regions(cl);
213   ShenandoahHeap::heap()->heap_region_iterate(&old_regions);
214 }
215 
216 void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) {
217   ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress);
218 }
219 
220 bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() {
221   return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress();
222 }
223 
224 void ShenandoahOldGeneration::cancel_marking() {
225   if (is_concurrent_mark_in_progress()) {
226     log_info(gc)("Abandon SATB buffers");
227     ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
228   }
229 
230   ShenandoahGeneration::cancel_marking();
231 }
232 
233 void ShenandoahOldGeneration::prepare_gc() {
234   // Make the old generation regions parseable, so they can be safely
235   // scanned when looking for objects in memory indicated by dirty cards.
236   if (entry_coalesce_and_fill()) {
237     // Now that we have made the old generation parseable, it is safe to reset the mark bitmap.
238     static const char* msg = "Concurrent reset (OLD)";
239     ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_reset_old);
240     ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
241                                 ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
242                                 msg);
243     ShenandoahGeneration::prepare_gc();
244   }
245   // Else, coalesce-and-fill has been preempted and we'll finish that effort in the future.  Do not invoke
246   // ShenandoahGeneration::prepare_gc() until coalesce-and-fill is done because it resets the mark bitmap
247   // and invokes set_mark_incomplete().  Coalesce-and-fill depends on the mark bitmap.
248 }
249 
250 bool ShenandoahOldGeneration::entry_coalesce_and_fill() {
251   ShenandoahHeap* const heap = ShenandoahHeap::heap();
252 
253   static const char* msg = "Coalescing and filling (OLD)";
254   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
255 
256   // TODO: I don't think we're using these concurrent collection counters correctly.
257   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
258   EventMark em("%s", msg);
259   ShenandoahWorkerScope scope(heap->workers(),
260                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
261                               msg);
262 
263   return coalesce_and_fill();
264 }
265 
266 bool ShenandoahOldGeneration::coalesce_and_fill() {
267   ShenandoahHeap* const heap = ShenandoahHeap::heap();
268   heap->set_prepare_for_old_mark_in_progress(true);
269   transition_to(FILLING);
270 
271   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
272   WorkerThreads* workers = heap->workers();
273   uint nworkers = workers->active_workers();
274 
275   log_debug(gc)("Starting (or resuming) coalesce-and-fill of old heap regions");
276 
277   // This code will see the same set of regions to fill on each resumption as it did
278   // on the initial run. That's okay because each region keeps track of its own coalesce
279   // and fill state. Regions that were filled on a prior attempt will not try to fill again.
280   uint coalesce_and_fill_regions_count = old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
281   assert(coalesce_and_fill_regions_count <= heap->num_regions(), "Sanity");
282   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count);
283 
284   workers->run_task(&task);
285   if (task.is_completed()) {
286     // Remember that we're done with coalesce-and-fill.
287     heap->set_prepare_for_old_mark_in_progress(false);
288     old_heuristics->abandon_collection_candidates();
289     return true;
290   } else {
291     // Otherwise, we were preempted before the work was done.
292     log_debug(gc)("Suspending coalesce-and-fill of old heap regions");
293     return false;
294   }
295 }
296 
297 void ShenandoahOldGeneration::transfer_pointers_from_satb() {
298   ShenandoahHeap* heap = ShenandoahHeap::heap();
299   shenandoah_assert_safepoint();
300   assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking.");
301   log_info(gc)("Transfer SATB buffers");
302   uint nworkers = heap->workers()->active_workers();
303   StrongRootsScope scope(nworkers);
304 
305   ShenandoahPurgeSATBTask purge_satb_task(task_queues());
306   heap->workers()->run_task(&purge_satb_task);
307 }
308 
309 bool ShenandoahOldGeneration::contains(oop obj) const {
310   return ShenandoahHeap::heap()->is_in_old(obj);
311 }
312 
313 void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) {
314   ShenandoahHeap* heap = ShenandoahHeap::heap();
315   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
316 
317   {
318     ShenandoahGCPhase phase(concurrent ?
319         ShenandoahPhaseTimings::final_update_region_states :
320         ShenandoahPhaseTimings::degen_gc_final_update_region_states);
321     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
322 
323     parallel_heap_region_iterate(&cl);
324     heap->assert_pinned_region_status();
325   }
326 
327   {
328     // This doesn't actually choose a collection set, but prepares a list of
329     // regions as 'candidates' for inclusion in a mixed collection.
330     ShenandoahGCPhase phase(concurrent ?
331         ShenandoahPhaseTimings::choose_cset :
332         ShenandoahPhaseTimings::degen_gc_choose_cset);
333     ShenandoahHeapLocker locker(heap->lock());
334     _old_heuristics->prepare_for_old_collections();
335   }
336 
337   {
338     // Though we did not choose a collection set above, we still may have
339     // freed up immediate garbage regions so proceed with rebuilding the free set.
340     ShenandoahGCPhase phase(concurrent ?
341         ShenandoahPhaseTimings::final_rebuild_freeset :
342         ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
343     ShenandoahHeapLocker locker(heap->lock());
344     size_t cset_young_regions, cset_old_regions;
345     heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions);
346     // This is just old-gen completion.  No future budgeting required here.  The only reason to rebuild the freeset here
347     // is in case there was any immediate old garbage identified.
348     heap->free_set()->rebuild(cset_young_regions, cset_old_regions);
349   }
350 }
351 
352 const char* ShenandoahOldGeneration::state_name(State state) {
353   switch (state) {
354     case IDLE:              return "Idle";
355     case FILLING:           return "Coalescing";
356     case BOOTSTRAPPING:     return "Bootstrapping";
357     case MARKING:           return "Marking";
358     case WAITING_FOR_EVAC:  return "Waiting for evacuation";
359     case WAITING_FOR_FILL:  return "Waiting for fill";
360     default:
361       ShouldNotReachHere();
362       return "Unknown";
363   }
364 }
365 
366 void ShenandoahOldGeneration::transition_to(State new_state) {
367   if (_state != new_state) {
368     log_info(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
369     validate_transition(new_state);
370     _state = new_state;
371   }
372 }
373 
374 #ifdef ASSERT
375 // This diagram depicts the expected state transitions for marking the old generation
376 // and preparing for old collections. When a young generation cycle executes, the
377 // remembered set scan must visit objects in old regions. Visiting an object which
378 // has become dead on previous old cycles will result in crashes. To avoid visiting
379 // such objects, the remembered set scan will use the old generation mark bitmap when
380 // possible. It is _not_ possible to use the old generation bitmap when old marking
381 // is active (bitmap is not complete). For this reason, the old regions are made
382 // parseable _before_ the old generation bitmap is reset. The diagram does not depict
383 // cancellation of old collections by global or full collections. However, it does
384 // depict a transition from IDLE to WAITING_FOR_FILL, which is allowed after a global
385 // cycle ends. Also note that a global collection will cause any evacuation or fill
386 // candidates to be abandoned, returning the old generation to the idle state.
387 //
388 //           +----------------> +-----------------+
389 //           |   +------------> |      IDLE       |
390 //           |   |   +--------> |                 |
391 //           |   |   |          +-----------------+
392 //           |   |   |            |
393 //           |   |   |            | Begin Old Mark
394 //           |   |   |            v
395 //           |   |   |          +-----------------+     +--------------------+
396 //           |   |   |          |     FILLING     | <-> |      YOUNG GC      |
397 //           |   |   |    +---> |                 |     | (RSet Uses Bitmap) |
398 //           |   |   |    |     +-----------------+     +--------------------+
399 //           |   |   |    |       |
400 //           |   |   |    |       | Reset Bitmap
401 //           |   |   |    |       v
402 //           |   |   |    |     +-----------------+
403 //           |   |   |    |     |    BOOTSTRAP    |
404 //           |   |   |    |     |                 |
405 //           |   |   |    |     +-----------------+
406 //           |   |   |    |       |
407 //           |   |   |    |       | Continue Marking
408 //           |   |   |    |       v
409 //           |   |   |    |     +-----------------+     +----------------------+
410 //           |   |   |    |     |    MARKING      | <-> |       YOUNG GC       |
411 //           |   |   +----|-----|                 |     | (RSet Parses Region) |
412 //           |   |        |     +-----------------+     +----------------------+
413 //           |   |        |       |
414 //           |   |        |       | Has Candidates
415 //           |   |        |       v
416 //           |   |        |     +-----------------+
417 //           |   |        |     |    WAITING FOR  |
418 //           |   +--------|---> |    EVACUATIONS  |
419 //           |            |     +-----------------+
420 //           |            |       |
421 //           |            |       | All Candidates are Pinned
422 //           |            |       v
423 //           |            |     +-----------------+
424 //           |            +---- |    WAITING FOR  |
425 //           +----------------> |    FILLING      |
426 //                              +-----------------+
427 //
428 void ShenandoahOldGeneration::validate_transition(State new_state) {
429   ShenandoahHeap* heap = ShenandoahHeap::heap();
430   switch (new_state) {
431     case IDLE:
432       // GC cancellation can send us back to IDLE from any state.
433       assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become idle during old mark.");
434       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot become idle with collection candidates");
435       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot become idle while making old generation parseable.");
436       assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become idle when setup for bootstrapping.");
437       break;
438     case FILLING:
439       assert(_state == IDLE || _state == WAITING_FOR_FILL, "Cannot begin filling without first completing evacuations, state is '%s'", state_name(_state));
440       assert(heap->is_prepare_for_old_mark_in_progress(), "Should be preparing for old mark now.");
441       break;
442     case BOOTSTRAPPING:
443       assert(_state == FILLING, "Cannot reset bitmap without making old regions parseable, state is '%s'", state_name(_state));
444       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates");
445       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parseable.");
446       break;
447     case MARKING:
448       assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state));
449       assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues.");
450       assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now.");
451       break;
452     case WAITING_FOR_EVAC:
453       assert(_state == IDLE || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state));
454       assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here.");
455       break;
456     case WAITING_FOR_FILL:
457       assert(_state == IDLE || _state == MARKING || _state == WAITING_FOR_EVAC, "Cannot begin filling without first marking or evacuating, state is '%s'", state_name(_state));
458       assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot wait for fill without something to fill.");
459       break;
460     default:
461       fatal("Unknown new state");
462   }
463 }
464 #endif
465 
466 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
467   _old_heuristics = new ShenandoahOldHeuristics(this);
468   _old_heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval);
469   _heuristics = _old_heuristics;
470   return _heuristics;
471 }
472 
473 void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) {
474   heuristics()->record_success_concurrent(abbreviated);
475   ShenandoahHeap::heap()->shenandoah_policy()->record_success_old();
476 }