1 /*
  2  * Copyright (c) 2021, Amazon.com, Inc. or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 29 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 30 #include "gc/shenandoah/shenandoahOldGC.hpp"
 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 32 #include "gc/shenandoah/shenandoahGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 35 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 36 #include "prims/jvmtiTagMap.hpp"
 37 #include "utilities/events.hpp"
 38 
 39 class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
 40 private:
 41   uint _nworkers;
 42   ShenandoahHeapRegion** _coalesce_and_fill_region_array;
 43   uint _coalesce_and_fill_region_count;
 44   ShenandoahConcurrentGC* _old_gc;
 45   volatile bool _is_preempted;
 46 
 47 public:
 48   ShenandoahConcurrentCoalesceAndFillTask(uint nworkers, ShenandoahHeapRegion** coalesce_and_fill_region_array,
 49                                           uint region_count, ShenandoahConcurrentGC* old_gc) :
 50     WorkerTask("Shenandoah Concurrent Coalesce and Fill"),
 51     _nworkers(nworkers),
 52     _coalesce_and_fill_region_array(coalesce_and_fill_region_array),
 53     _coalesce_and_fill_region_count(region_count),
 54     _old_gc(old_gc),
 55     _is_preempted(false) {
 56   }
 57 
 58   void work(uint worker_id) {
 59     for (uint region_idx = worker_id; region_idx < _coalesce_and_fill_region_count; region_idx += _nworkers) {
 60       ShenandoahHeapRegion* r = _coalesce_and_fill_region_array[region_idx];
 61       if (!r->is_humongous()) {
 62         if (!r->oop_fill_and_coalesce()) {
 63           // Coalesce and fill has been preempted
 64           Atomic::store(&_is_preempted, true);
 65           return;
 66         }
 67       } else {
 68         // there's only one object in this region and it's not garbage, so no need to coalesce or fill
 69       }
 70     }
 71   }
 72 
 73   // Value returned from is_completed() is only valid after all worker thread have terminated.
 74   bool is_completed() {
 75     return !Atomic::load(&_is_preempted);
 76   }
 77 };
 78 
 79 
 80 ShenandoahOldGC::ShenandoahOldGC(ShenandoahGeneration* generation, ShenandoahSharedFlag& allow_preemption) :
 81     ShenandoahConcurrentGC(generation, false), _allow_preemption(allow_preemption) {
 82   _coalesce_and_fill_region_array = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC);
 83 }
 84 
 85 void ShenandoahOldGC::start_old_evacuations() {
 86   ShenandoahHeap* heap = ShenandoahHeap::heap();
 87   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
 88   old_heuristics->start_old_evacuations();
 89 }
 90 
 91 
 92 // Final mark for old-gen is different than for young or old, so we
 93 // override the implementation.
 94 void ShenandoahOldGC::op_final_mark() {
 95 
 96   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 97   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
 98   assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
 99 
100   if (ShenandoahVerify) {
101     heap->verifier()->verify_roots_no_forwarded();
102   }
103 
104   if (!heap->cancelled_gc()) {
105     assert(_mark.generation()->generation_mode() == OLD, "Generation of Old-Gen GC should be OLD");
106     _mark.finish_mark();
107     assert(!heap->cancelled_gc(), "STW mark cannot OOM");
108 
109     // Old collection is complete, the young generation no longer needs this
110     // reference to the old concurrent mark so clean it up.
111     heap->young_generation()->set_old_gen_task_queues(NULL);
112 
113     // We need to do this because weak root cleaning reports the number of dead handles
114     JvmtiTagMap::set_needs_cleaning();
115 
116     _generation->prepare_regions_and_collection_set(true);
117 
118     heap->set_unload_classes(false);
119     heap->prepare_concurrent_roots();
120 
121     // Believe verification following old-gen concurrent mark needs to be different than verification following
122     // young-gen concurrent mark, so am commenting this out for now:
123     //   if (ShenandoahVerify) {
124     //     heap->verifier()->verify_after_concmark();
125     //   }
126 
127     if (VerifyAfterGC) {
128       Universe::verify();
129     }
130   }
131 }
132 
133 bool ShenandoahOldGC::collect(GCCause::Cause cause) {
134   ShenandoahHeap* heap = ShenandoahHeap::heap();
135 
136   if (!heap->is_concurrent_prep_for_mixed_evacuation_in_progress()) {
137     // Skip over the initial phases of old collect if we're resuming mixed evacuation preparation.
138     // Continue concurrent mark, do not reset regions, do not mark roots, do not collect $200.
139     _allow_preemption.set();
140     entry_mark();
141     if (!_allow_preemption.try_unset()) {
142       // The regulator thread has unset the preemption guard. That thread will shortly cancel
143       // the gc, but the control thread is now racing it. Wait until this thread sees the cancellation.
144       while (!heap->cancelled_gc()) {
145         SpinPause();
146       }
147     }
148 
149     if (heap->cancelled_gc()) {
150       return false;
151     }
152 
153     // Complete marking under STW
154     vmop_entry_final_mark();
155 
156     // We aren't dealing with old generation evacuation yet. Our heuristic
157     // should not have built a cset in final mark.
158     assert(!heap->is_evacuation_in_progress(), "Old gen evacuations are not supported");
159 
160     // Process weak roots that might still point to regions that would be broken by cleanup
161     if (heap->is_concurrent_weak_root_in_progress()) {
162       entry_weak_refs();
163       entry_weak_roots();
164     }
165 
166     // Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
167     // the space. This would be the last action if there is nothing to evacuate.
168     entry_cleanup_early();
169 
170     {
171       ShenandoahHeapLocker locker(heap->lock());
172       heap->free_set()->log_status();
173     }
174 
175 
176     // TODO: Old marking doesn't support class unloading yet
177     // Perform concurrent class unloading
178     // if (heap->unload_classes() &&
179     //     heap->is_concurrent_weak_root_in_progress()) {
180     //   entry_class_unloading();
181     // }
182 
183     heap->set_concurrent_prep_for_mixed_evacuation_in_progress(true);
184   }
185 
186 
187   assert(!heap->is_concurrent_strong_root_in_progress(), "No evacuations during old gc.");
188 
189   // We must execute this vm operation if we completed final mark. We cannot
190   // return from here with weak roots in progress. This is not a valid gc state
191   // for any young collections (or allocation failures) that interrupt the old
192   // collection.
193   vmop_entry_final_roots(false);
194 
195   // Coalesce and fill objects _after_ weak root processing and class unloading.
196   // Weak root and reference processing makes assertions about unmarked referents
197   // that will fail if they've been overwritten with filler objects. There is also
198   // a case in the LRB that permits access to from-space objects for the purpose
199   // of class unloading that is unlikely to function correctly if the object has
200   // been filled.
201   _allow_preemption.set();
202 
203   if (heap->cancelled_gc()) {
204     return false;
205   }
206 
207   if (heap->is_concurrent_prep_for_mixed_evacuation_in_progress()) {
208     if (!entry_coalesce_and_fill()) {
209       // If an allocation failure occurs during coalescing, we will run a degenerated
210       // cycle for the young generation. This should be a rare event.  Normally, we'll
211       // resume the coalesce-and-fill effort after the preempting young-gen GC finishes.
212       return false;
213     }
214   }
215   if (!_allow_preemption.try_unset()) {
216     // The regulator thread has unset the preemption guard. That thread will shortly cancel
217     // the gc, but the control thread is now racing it. Wait until this thread sees the cancellation.
218     while (!heap->cancelled_gc()) {
219       SpinPause();
220     }
221   }
222   // Prepare for old evacuations (actual evacuations will happen on subsequent young collects).  This cannot
223   // begin until after we have completed coalesce-and-fill.
224   start_old_evacuations();
225 
226   return true;
227 }
228 
229 void ShenandoahOldGC::entry_coalesce_and_fill_message(char *buf, size_t len) const {
230   // ShenandoahHeap* const heap = ShenandoahHeap::heap();
231   jio_snprintf(buf, len, "Coalescing and filling (%s)", _generation->name());
232 }
233 
234 bool ShenandoahOldGC::op_coalesce_and_fill() {
235   ShenandoahHeap* const heap = ShenandoahHeap::heap();
236   ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
237   WorkerThreads* workers = heap->workers();
238   uint nworkers = workers->active_workers();
239 
240   assert(_generation->generation_mode() == OLD, "Only old-GC does coalesce and fill");
241   log_debug(gc)("Starting (or resuming) coalesce-and-fill of old heap regions");
242   uint coalesce_and_fill_regions_count = old_heuristics->old_coalesce_and_fill_candidates();
243   assert(coalesce_and_fill_regions_count <= heap->num_regions(), "Sanity");
244   old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
245   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count, this);
246 
247   workers->run_task(&task);
248   if (task.is_completed()) {
249     // Remember that we're done with coalesce-and-fill.
250     heap->set_concurrent_prep_for_mixed_evacuation_in_progress(false);
251     return true;
252   } else {
253     log_debug(gc)("Suspending coalesce-and-fill of old heap regions");
254     // Otherwise, we got preempted before the work was done.
255     return false;
256   }
257 }
258 
259 bool ShenandoahOldGC::entry_coalesce_and_fill() {
260   char msg[1024];
261   ShenandoahHeap* const heap = ShenandoahHeap::heap();
262 
263   entry_coalesce_and_fill_message(msg, sizeof(msg));
264   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::coalesce_and_fill);
265 
266   TraceCollectorStats tcs(heap->monitoring_support()->concurrent_collection_counters());
267   EventMark em("%s", msg);
268   ShenandoahWorkerScope scope(heap->workers(),
269                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
270                               "concurrent coalesce and fill");
271 
272   return op_coalesce_and_fill();
273 }