1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shared/strongRootsScope.hpp"
 29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 30 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
 31 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
 32 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
 33 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 34 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
 35 #include "gc/shenandoah/shenandoahAsserts.hpp"
 36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 37 #include "gc/shenandoah/shenandoahHeap.hpp"
 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 39 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 40 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 41 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 42 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 43 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 46 #include "gc/shenandoah/shenandoahStringDedup.hpp"
 47 #include "gc/shenandoah/shenandoahUtils.hpp"
 48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 49 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 50 #include "prims/jvmtiTagMap.hpp"
 51 #include "runtime/threads.hpp"
 52 #include "utilities/events.hpp"
 53 
 54 class ShenandoahFlushAllSATB : public ThreadClosure {
 55 private:
 56   SATBMarkQueueSet& _satb_qset;
 57 
 58 public:
 59   explicit ShenandoahFlushAllSATB(SATBMarkQueueSet& satb_qset) :
 60     _satb_qset(satb_qset) {}
 61 
 62   void do_thread(Thread* thread) {
 63     // Transfer any partial buffer to the qset for completed buffer processing.
 64     _satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
 65   }
 66 };
 67 
 68 class ShenandoahProcessOldSATB : public SATBBufferClosure {
 69 private:
 70   ShenandoahObjToScanQueue*       _queue;
 71   ShenandoahHeap*                 _heap;
 72   ShenandoahMarkingContext* const _mark_context;
 73   size_t                          _trashed_oops;
 74 
 75 public:
 76   explicit ShenandoahProcessOldSATB(ShenandoahObjToScanQueue* q) :
 77     _queue(q),
 78     _heap(ShenandoahHeap::heap()),
 79     _mark_context(_heap->marking_context()),
 80     _trashed_oops(0) {}
 81 
 82   void do_buffer(void** buffer, size_t size) {
 83     assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
 84     for (size_t i = 0; i < size; ++i) {
 85       oop *p = (oop *) &buffer[i];
 86       ShenandoahHeapRegion* region = _heap->heap_region_containing(*p);
 87       if (region->is_old() && region->is_active()) {
 88           ShenandoahMark::mark_through_ref<oop, OLD>(p, _queue, nullptr, _mark_context, false);
 89       } else {
 90         _trashed_oops++;
 91       }
 92     }
 93   }
 94 
 95   size_t trashed_oops() {
 96     return _trashed_oops;
 97   }
 98 };
 99 
100 class ShenandoahPurgeSATBTask : public WorkerTask {
101 private:
102   ShenandoahObjToScanQueueSet* _mark_queues;
103   volatile size_t             _trashed_oops;
104 
105 public:
106   explicit ShenandoahPurgeSATBTask(ShenandoahObjToScanQueueSet* queues) :
107     WorkerTask("Purge SATB"),
108     _mark_queues(queues),
109     _trashed_oops(0) {
110     Threads::change_thread_claim_token();
111   }
112 
113   ~ShenandoahPurgeSATBTask() {
114     if (_trashed_oops > 0) {
115       log_info(gc)("Purged " SIZE_FORMAT " oops from old generation SATB buffers", _trashed_oops);
116     }
117   }
118 
119   void work(uint worker_id) {
120     ShenandoahParallelWorkerSession worker_session(worker_id);
121     ShenandoahSATBMarkQueueSet &satb_queues = ShenandoahBarrierSet::satb_mark_queue_set();
122     ShenandoahFlushAllSATB flusher(satb_queues);
123     Threads::possibly_parallel_threads_do(true /* is_par */, &flusher);
124 
125     ShenandoahObjToScanQueue* mark_queue = _mark_queues->queue(worker_id);
126     ShenandoahProcessOldSATB processor(mark_queue);
127     while (satb_queues.apply_closure_to_completed_buffer(&processor)) {}
128 
129     Atomic::add(&_trashed_oops, processor.trashed_oops());
130   }
131 };
132 
133 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
134 private:
135   uint                    _nworkers;
136   ShenandoahHeapRegion**  _coalesce_and_fill_region_array;
137   uint                    _coalesce_and_fill_region_count;
138   volatile bool           _is_preempted;
139 
140 public:
141   ShenandoahConcurrentCoalesceAndFillTask(uint nworkers,
142                                           ShenandoahHeapRegion** coalesce_and_fill_region_array,
143                                           uint region_count) :
144     WorkerTask("Shenandoah Concurrent Coalesce and Fill"),
145     _nworkers(nworkers),
146     _coalesce_and_fill_region_array(coalesce_and_fill_region_array),
147     _coalesce_and_fill_region_count(region_count),
148     _is_preempted(false) {
149   }
150 
151   void work(uint worker_id) {
152     for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) {
153       ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx];
154       if (r->is_humongous()) {
155         // There is only one object in this region and it is not garbage,
156         // so no need to coalesce or fill.
157         continue;
158       }
159 
160       if (!r->oop_fill_and_coalesce()) {
161         // Coalesce and fill has been preempted
162         Atomic::store(&_is_preempted, true);
163         return;
164       }
165     }
166   }
167 
168   // Value returned from is_completed() is only valid after all worker thread have terminated.
169   bool is_completed() {
170     return !Atomic::load(&_is_preempted);
171   }
172 };
173 
174 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity)
175   : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity),
176     _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
177     _state(WAITING_FOR_BOOTSTRAP),
178     _growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION),
179     _min_growth_before_compaction ((ShenandoahMinOldGenGrowthPercent * FRACTIONAL_DENOMINATOR) / 100)
180 {
181   _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
182   // Always clear references for old generation
183   ref_processor()->set_soft_reference_policy(true);
184 }
185 
186 size_t ShenandoahOldGeneration::get_live_bytes_after_last_mark() const {
187   return _live_bytes_after_last_mark;
188 }
189 
190 void ShenandoahOldGeneration::set_live_bytes_after_last_mark(size_t bytes) {
191   _live_bytes_after_last_mark = bytes;
192   _growth_before_compaction /= 2;
193   if (_growth_before_compaction < _min_growth_before_compaction) {
194     _growth_before_compaction = _min_growth_before_compaction;
195   }
196 }
197 
198 size_t ShenandoahOldGeneration::usage_trigger_threshold() const {
199   size_t result = _live_bytes_after_last_mark + (_live_bytes_after_last_mark * _growth_before_compaction) / FRACTIONAL_DENOMINATOR;
200   return result;
201 }
202 
203 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const {
204   // TODO: Should this be region->is_old() instead?
205   return !region->is_young();
206 }
207 
208 void ShenandoahOldGeneration::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
209   ShenandoahGenerationRegionClosure<OLD> old_regions(cl);
210   ShenandoahHeap::heap()->parallel_heap_region_iterate(&old_regions);
211 }
212 
213 void ShenandoahOldGeneration::heap_region_iterate(ShenandoahHeapRegionClosure* cl) {
214   ShenandoahGenerationRegionClosure<OLD> old_regions(cl);
215   ShenandoahHeap::heap()->heap_region_iterate(&old_regions);
216 }
217 
218 void ShenandoahOldGeneration::set_concurrent_mark_in_progress(bool in_progress) {
219   ShenandoahHeap::heap()->set_concurrent_old_mark_in_progress(in_progress);
220 }
221 
222 bool ShenandoahOldGeneration::is_concurrent_mark_in_progress() {
223   return ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress();
224 }
225 
226 void ShenandoahOldGeneration::cancel_marking() {
227   if (is_concurrent_mark_in_progress()) {
228     log_info(gc)("Abandon SATB buffers");
229     ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
230   }
231 
232   ShenandoahGeneration::cancel_marking();
233 }
234 
235 void ShenandoahOldGeneration::prepare_gc() {
236 
237   // Now that we have made the old generation parsable, it is safe to reset the mark bitmap.
238   assert(state() != FILLING, "Cannot reset old without making it parsable");
239 
240   ShenandoahGeneration::prepare_gc();
241 }
242 
243 bool ShenandoahOldGeneration::entry_coalesce_and_fill() {
244   ShenandoahHeap* const heap = ShenandoahHeap::heap();
245 
246   static const char* msg = "Coalescing and filling (OLD)";
247   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
248 
249   // TODO: I don't think we're using these concurrent collection counters correctly.
250   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
251   EventMark em("%s", msg);
252   ShenandoahWorkerScope scope(heap->workers(),
253                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
254                               msg);
255 
256   return coalesce_and_fill();
257 }
258 
259 // Make the old generation regions parsable, so they can be safely
260 // scanned when looking for objects in memory indicated by dirty cards.
261 bool ShenandoahOldGeneration::coalesce_and_fill() {
262   ShenandoahHeap* const heap = ShenandoahHeap::heap();
263   transition_to(FILLING);
264 
265   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
266   WorkerThreads* workers = heap->workers();
267   uint nworkers = workers->active_workers();
268 
269   log_debug(gc)("Starting (or resuming) coalesce-and-fill of old heap regions");
270 
271   // This code will see the same set of regions to fill on each resumption as it did
272   // on the initial run. That's okay because each region keeps track of its own coalesce
273   // and fill state. Regions that were filled on a prior attempt will not try to fill again.
274   uint coalesce_and_fill_regions_count = old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
275   assert(coalesce_and_fill_regions_count <= heap->num_regions(), "Sanity");
276   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count);
277 
278   workers->run_task(&task);
279   if (task.is_completed()) {
280     old_heuristics->abandon_collection_candidates();
281     return true;
282   } else {
283     // Coalesce-and-fill has been preempted. We'll finish that effort in the future.  Do not invoke
284     // ShenandoahGeneration::prepare_gc() until coalesce-and-fill is done because it resets the mark bitmap
285     // and invokes set_mark_incomplete().  Coalesce-and-fill depends on the mark bitmap.
286     log_debug(gc)("Suspending coalesce-and-fill of old heap regions");
287     return false;
288   }
289 }
290 
291 void ShenandoahOldGeneration::transfer_pointers_from_satb() {
292   ShenandoahHeap* heap = ShenandoahHeap::heap();
293   shenandoah_assert_safepoint();
294   assert(heap->is_concurrent_old_mark_in_progress(), "Only necessary during old marking.");
295   log_info(gc)("Transfer SATB buffers");
296   uint nworkers = heap->workers()->active_workers();
297   StrongRootsScope scope(nworkers);
298 
299   ShenandoahPurgeSATBTask purge_satb_task(task_queues());
300   heap->workers()->run_task(&purge_satb_task);
301 }
302 
303 bool ShenandoahOldGeneration::contains(oop obj) const {
304   return ShenandoahHeap::heap()->is_in_old(obj);
305 }
306 
307 void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent) {
308   ShenandoahHeap* heap = ShenandoahHeap::heap();
309   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
310 
311   {
312     ShenandoahGCPhase phase(concurrent ?
313         ShenandoahPhaseTimings::final_update_region_states :
314         ShenandoahPhaseTimings::degen_gc_final_update_region_states);
315     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
316 
317     parallel_heap_region_iterate(&cl);
318     heap->assert_pinned_region_status();
319   }
320 
321   {
322     // This doesn't actually choose a collection set, but prepares a list of
323     // regions as 'candidates' for inclusion in a mixed collection.
324     ShenandoahGCPhase phase(concurrent ?
325         ShenandoahPhaseTimings::choose_cset :
326         ShenandoahPhaseTimings::degen_gc_choose_cset);
327     ShenandoahHeapLocker locker(heap->lock());
328     _old_heuristics->prepare_for_old_collections();
329   }
330 
331   {
332     // Though we did not choose a collection set above, we still may have
333     // freed up immediate garbage regions so proceed with rebuilding the free set.
334     ShenandoahGCPhase phase(concurrent ?
335         ShenandoahPhaseTimings::final_rebuild_freeset :
336         ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
337     ShenandoahHeapLocker locker(heap->lock());
338     size_t cset_young_regions, cset_old_regions;
339     size_t first_old, last_old, num_old;
340     heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old);
341     // This is just old-gen completion.  No future budgeting required here.  The only reason to rebuild the freeset here
342     // is in case there was any immediate old garbage identified.
343     heap->free_set()->rebuild(cset_young_regions, cset_old_regions);
344   }
345 }
346 
347 const char* ShenandoahOldGeneration::state_name(State state) {
348   switch (state) {
349     case WAITING_FOR_BOOTSTRAP: return "Waiting for Bootstrap";
350     case FILLING:               return "Coalescing";
351     case BOOTSTRAPPING:         return "Bootstrapping";
352     case MARKING:               return "Marking";
353     case EVACUATING:            return "Evacuating";
354     default:
355       ShouldNotReachHere();
356       return "Unknown";
357   }
358 }
359 
360 void ShenandoahOldGeneration::transition_to(State new_state) {
361   if (_state != new_state) {
362     log_info(gc)("Old generation transition from %s to %s", state_name(_state), state_name(new_state));
363     validate_transition(new_state);
364     _state = new_state;
365   }
366 }
367 
368 #ifdef ASSERT
369 // This diagram depicts the expected state transitions for marking the old generation
370 // and preparing for old collections. When a young generation cycle executes, the
371 // remembered set scan must visit objects in old regions. Visiting an object which
372 // has become dead on previous old cycles will result in crashes. To avoid visiting
373 // such objects, the remembered set scan will use the old generation mark bitmap when
374 // possible. It is _not_ possible to use the old generation bitmap when old marking
375 // is active (bitmap is not complete). For this reason, the old regions are made
376 // parsable _before_ the old generation bitmap is reset. The diagram does not depict
377 // cancellation of old collections by global or full collections.
378 //
379 // When a global collection supersedes an old collection, the global mark still
380 // "completes" the old mark bitmap. Subsequent remembered set scans may use the
381 // old generation mark bitmap, but any uncollected old regions must still be made parsable
382 // before the next old generation cycle begins. For this reason, a global collection may
383 // create mixed collection candidates and coalesce and fill candidates and will put
384 // the old generation in the respective states (EVACUATING or FILLING). After a Full GC,
385 // the mark bitmaps are all reset, all regions are parsable and the mark context will
386 // not be "complete". After a Full GC, remembered set scans will _not_ use the mark bitmap
387 // and we expect the old generation to be waiting for bootstrap.
388 //
389 //                              +-----------------+
390 //               +------------> |     FILLING     | <---+
391 //               |   +--------> |                 |     |
392 //               |   |          +-----------------+     |
393 //               |   |            |                     |
394 //               |   |            | Filling Complete    | <-> A global collection may
395 //               |   |            v                     |     may move the old generation
396 //               |   |          +-----------------+     |     directly from waiting for
397 //               |   +--------> |     WAITING     |     |     bootstrap to filling or
398 //               |   |    +---- |  FOR BOOTSTRAP  | ----+     evacuating.
399 //               |   |    |     +-----------------+
400 //               |   |    |       |
401 //               |   |    |       | Reset Bitmap
402 //               |   |    |       v
403 //               |   |    |     +-----------------+     +----------------------+
404 //               |   |    |     |    BOOTSTRAP    | <-> |       YOUNG GC       |
405 //               |   |    |     |                 |     | (RSet Parses Region) |
406 //               |   |    |     +-----------------+     +----------------------+
407 //               |   |    |       |
408 //               |   |    |       | Old Marking
409 //               |   |    |       v
410 //               |   |    |     +-----------------+     +----------------------+
411 //               |   |    |     |     MARKING     | <-> |       YOUNG GC       |
412 //               |   +--------- |                 |     | (RSet Parses Region) |
413 //               |        |     +-----------------+     +----------------------+
414 //               |        |       |
415 //               |        |       | Has Evacuation Candidates
416 //               |        |       v
417 //               |        |     +-----------------+     +--------------------+
418 //               |        +---> |    EVACUATING   | <-> |      YOUNG GC      |
419 //               +------------- |                 |     | (RSet Uses Bitmap) |
420 //                              +-----------------+     +--------------------+
421 //
422 //
423 //
424 void ShenandoahOldGeneration::validate_transition(State new_state) {
425   ShenandoahHeap* heap = ShenandoahHeap::heap();
426   switch (new_state) {
427     case FILLING:
428       assert(_state != BOOTSTRAPPING, "Cannot beging making old regions parsable after bootstrapping");
429       assert(heap->is_old_bitmap_stable(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state));
430       assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot begin filling without something to fill.");
431       break;
432     case WAITING_FOR_BOOTSTRAP:
433       // GC cancellation can send us back here from any state.
434       assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark.");
435       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot become ready for bootstrap with collection candidates");
436       assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping.");
437       break;
438     case BOOTSTRAPPING:
439       assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state));
440       assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates");
441       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable.");
442       break;
443     case MARKING:
444       assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state));
445       assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues.");
446       assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now.");
447       break;
448     case EVACUATING:
449       assert(_state == WAITING_FOR_BOOTSTRAP || _state == MARKING, "Cannot have old collection candidates without first marking, state is '%s'", state_name(_state));
450       assert(_old_heuristics->unprocessed_old_collection_candidates() > 0, "Must have collection candidates here.");
451       break;
452     default:
453       fatal("Unknown new state");
454   }
455 }
456 #endif
457 
458 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
459   _old_heuristics = new ShenandoahOldHeuristics(this);
460   _old_heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedOldGCInterval);
461   _heuristics = _old_heuristics;
462   return _heuristics;
463 }
464 
465 void ShenandoahOldGeneration::record_success_concurrent(bool abbreviated) {
466   heuristics()->record_success_concurrent(abbreviated);
467   ShenandoahHeap::heap()->shenandoah_policy()->record_success_old();
468 }