1 /*
  2  * Copyright (c) 2020, 2021 Amazon.com, Inc. and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 28 #include "gc/shenandoah/shenandoahGeneration.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.hpp"
 30 #include "gc/shenandoah/shenandoahMarkClosures.hpp"
 31 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 33 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 34 #include "gc/shenandoah/shenandoahUtils.hpp"
 35 #include "gc/shenandoah/shenandoahVerifier.hpp"
 36 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
 37 
 38 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
 39  private:
 40   ShenandoahMarkingContext* const _ctx;
 41  public:
 42   ShenandoahResetUpdateRegionStateClosure() :
 43     _ctx(ShenandoahHeap::heap()->marking_context()) {}
 44 
 45   void heap_region_do(ShenandoahHeapRegion* r) {
 46     if (r->is_active()) {
 47       // Reset live data and set TAMS optimistically. We would recheck these under the pause
 48       // anyway to capture any updates that happened since now.
 49       _ctx->capture_top_at_mark_start(r);
 50       r->clear_live_data();
 51     }
 52   }
 53 
 54   bool is_thread_safe() { return true; }
 55 };
 56 
 57 class ShenandoahResetBitmapTask : public ShenandoahHeapRegionClosure {
 58  private:
 59   ShenandoahHeap* _heap;
 60   ShenandoahMarkingContext* const _ctx;
 61  public:
 62   ShenandoahResetBitmapTask() :
 63     _heap(ShenandoahHeap::heap()),
 64     _ctx(_heap->marking_context()) {}
 65 
 66   void heap_region_do(ShenandoahHeapRegion* region) {
 67     if (_heap->is_bitmap_slice_committed(region)) {
 68       _ctx->clear_bitmap(region);
 69     }
 70   }
 71 
 72   bool is_thread_safe() { return true; }
 73 };
 74 
 75 class ShenandoahSquirrelAwayCardTable: public ShenandoahHeapRegionClosure {
 76  private:
 77   ShenandoahHeap* _heap;
 78   RememberedScanner* _scanner;
 79  public:
 80   ShenandoahSquirrelAwayCardTable() :
 81     _heap(ShenandoahHeap::heap()),
 82     _scanner(_heap->card_scan()) {}
 83 
 84   void heap_region_do(ShenandoahHeapRegion* region) {
 85     if (region->is_old()) {
 86       _scanner->reset_remset(region->bottom(), ShenandoahHeapRegion::region_size_words());
 87     }
 88   }
 89 
 90   bool is_thread_safe() { return true; }
 91 };
 92 
 93 void ShenandoahGeneration::confirm_heuristics_mode() {
 94   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 95     vm_exit_during_initialization(
 96             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 97                     _heuristics->name()));
 98   }
 99   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
100     vm_exit_during_initialization(
101             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
102                     _heuristics->name()));
103   }
104 }
105 
106 ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
107   _heuristics = gc_mode->initialize_heuristics(this);
108   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedGCInterval);
109   confirm_heuristics_mode();
110   return _heuristics;
111 }
112 
113 size_t ShenandoahGeneration::bytes_allocated_since_gc_start() {
114   return Atomic::load(&_bytes_allocated_since_gc_start);;
115 }
116 
117 void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() {
118   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
119 }
120 
121 void ShenandoahGeneration::increase_allocated(size_t bytes) {
122   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
123 }
124 
125 void ShenandoahGeneration::log_status() const {
126   typedef LogTarget(Info, gc, ergo) LogGcInfo;
127 
128   if (!LogGcInfo::is_enabled()) {
129     return;
130   }
131 
132   // Not under a lock here, so read each of these once to make sure
133   // byte size in proper unit and proper unit for byte size are consistent.
134   size_t v_used = used();
135   size_t v_used_regions = used_regions_size();
136   size_t v_soft_max_capacity = soft_max_capacity();
137   size_t v_max_capacity = max_capacity();
138   size_t v_available = available();
139   LogGcInfo::print("%s generation used: " SIZE_FORMAT "%s, used regions: " SIZE_FORMAT "%s, "
140                    "soft capacity: " SIZE_FORMAT "%s, max capacity: " SIZE_FORMAT " %s, available: " SIZE_FORMAT " %s",
141                    name(),
142                    byte_size_in_proper_unit(v_used), proper_unit_for_byte_size(v_used),
143                    byte_size_in_proper_unit(v_used_regions), proper_unit_for_byte_size(v_used_regions),
144                    byte_size_in_proper_unit(v_soft_max_capacity), proper_unit_for_byte_size(v_soft_max_capacity),
145                    byte_size_in_proper_unit(v_max_capacity), proper_unit_for_byte_size(v_max_capacity),
146                    byte_size_in_proper_unit(v_available), proper_unit_for_byte_size(v_available));
147 }
148 
149 void ShenandoahGeneration::reset_mark_bitmap() {
150   ShenandoahHeap* heap = ShenandoahHeap::heap();
151   heap->assert_gc_workers(heap->workers()->active_workers());
152 
153   set_mark_incomplete();
154 
155   ShenandoahResetBitmapTask task;
156   parallel_heap_region_iterate(&task);
157 }
158 
159 // The ideal is to swap the remembered set so the safepoint effort is no more than a few pointer manipulations.
160 // However, limitations in the implementation of the mutator write-barrier make it difficult to simply change the
161 // location of the card table.  So the interim implementation of swap_remembered_set will copy the write-table
162 // onto the read-table and will then clear the write-table.
163 void ShenandoahGeneration::swap_remembered_set() {
164   // Must be sure that marking is complete before we swap remembered set.
165   ShenandoahHeap* heap = ShenandoahHeap::heap();
166   heap->assert_gc_workers(heap->workers()->active_workers());
167   shenandoah_assert_safepoint();
168 
169   // TODO: Eventually, we want replace this with a constant-time exchange of pointers.
170   ShenandoahSquirrelAwayCardTable task;
171   heap->old_generation()->parallel_heap_region_iterate(&task);
172 }
173 
174 void ShenandoahGeneration::prepare_gc(bool do_old_gc_bootstrap) {
175   // Reset mark bitmap for this generation (typically young)
176   reset_mark_bitmap();
177   if (do_old_gc_bootstrap) {
178     // Reset mark bitmap for old regions also.  Note that do_old_gc_bootstrap is only true if this generation is YOUNG.
179     ShenandoahHeap::heap()->old_generation()->reset_mark_bitmap();
180   }
181 
182   // Capture Top At Mark Start for this generation (typically young)
183   ShenandoahResetUpdateRegionStateClosure cl;
184   parallel_heap_region_iterate(&cl);
185   if (do_old_gc_bootstrap) {
186     // Capture top at mark start for both old-gen regions also.  Note that do_old_gc_bootstrap is only true if generation is YOUNG.
187     ShenandoahHeap::heap()->old_generation()->parallel_heap_region_iterate(&cl);
188   }
189 }
190 
191 // Returns true iff the chosen collection set includes a mix of young-gen and old-gen regions.
192 bool ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
193   bool result;
194   ShenandoahHeap* heap = ShenandoahHeap::heap();
195   assert(!heap->is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
196   assert(generation_mode() != OLD, "Only YOUNG and GLOBAL GC perform evacuations");
197   {
198     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
199                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
200     ShenandoahFinalMarkUpdateRegionStateClosure cl(complete_marking_context());
201 
202     parallel_heap_region_iterate(&cl);
203     heap->assert_pinned_region_status();
204 
205     if (generation_mode() == YOUNG) {
206       // Also capture update_watermark for old-gen regions.
207       ShenandoahCaptureUpdateWaterMarkForOld old_cl(complete_marking_context());
208       heap->old_generation()->parallel_heap_region_iterate(&old_cl);
209     }
210   }
211 
212   {
213     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
214                             ShenandoahPhaseTimings::degen_gc_choose_cset);
215     ShenandoahHeapLocker locker(heap->lock());
216     heap->collection_set()->clear();
217     result = _heuristics->choose_collection_set(heap->collection_set(), heap->old_heuristics());
218   }
219 
220   {
221     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
222                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
223     ShenandoahHeapLocker locker(heap->lock());
224     heap->free_set()->rebuild();
225   }
226   return result;
227 }
228 
229 bool ShenandoahGeneration::is_bitmap_clear() {
230   ShenandoahHeap* heap = ShenandoahHeap::heap();
231   ShenandoahMarkingContext* context = heap->marking_context();
232   size_t num_regions = heap->num_regions();
233   for (size_t idx = 0; idx < num_regions; idx++) {
234     ShenandoahHeapRegion* r = heap->get_region(idx);
235     if (contains(r) && (r->affiliation() != FREE)) {
236       if (heap->is_bitmap_slice_committed(r) && (context->top_at_mark_start(r) > r->bottom()) &&
237           !context->is_bitmap_clear_range(r->bottom(), r->end())) {
238         return false;
239       }
240     }
241   }
242   return true;
243 }
244 
245 bool ShenandoahGeneration::is_mark_complete() {
246   return _is_marking_complete.is_set();
247 }
248 
249 void ShenandoahGeneration::set_mark_complete() {
250   _is_marking_complete.set();
251 }
252 
253 void ShenandoahGeneration::set_mark_incomplete() {
254   _is_marking_complete.unset();
255 }
256 
257 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
258   assert(is_mark_complete(), "Marking must be completed.");
259   return ShenandoahHeap::heap()->marking_context();
260 }
261 
262 void ShenandoahGeneration::cancel_marking() {
263   if (is_concurrent_mark_in_progress()) {
264     set_concurrent_mark_in_progress(false);
265   }
266   set_mark_incomplete();
267   _task_queues->clear();
268 
269   ref_processor()->abandon_partial_discovery();
270 }
271 
272 ShenandoahGeneration::ShenandoahGeneration(GenerationMode generation_mode,
273                                            uint max_workers,
274                                            size_t max_capacity,
275                                            size_t soft_max_capacity) :
276   _generation_mode(generation_mode),
277   _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
278   _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
279   _affiliated_region_count(0), _used(0), _bytes_allocated_since_gc_start(0),
280   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
281   _heuristics(nullptr) {
282   _is_marking_complete.set();
283   assert(max_workers > 0, "At least one queue");
284   for (uint i = 0; i < max_workers; ++i) {
285     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
286     _task_queues->register_queue(i, task_queue);
287   }
288 }
289 
290 ShenandoahGeneration::~ShenandoahGeneration() {
291   for (uint i = 0; i < _task_queues->size(); ++i) {
292     ShenandoahObjToScanQueue* q = _task_queues->queue(i);
293     delete q;
294   }
295   delete _task_queues;
296 }
297 
298 void ShenandoahGeneration::reserve_task_queues(uint workers) {
299   _task_queues->reserve(workers);
300 }
301 
302 ShenandoahObjToScanQueueSet* ShenandoahGeneration::old_gen_task_queues() const {
303   return nullptr;
304 }
305 
306 void ShenandoahGeneration::scan_remembered_set() {
307   assert(generation_mode() == YOUNG, "Should only scan remembered set for young generation.");
308 
309   ShenandoahHeap* const heap = ShenandoahHeap::heap();
310   uint nworkers = heap->workers()->active_workers();
311   reserve_task_queues(nworkers);
312 
313   ShenandoahReferenceProcessor* rp = ref_processor();
314   ShenandoahRegionIterator regions;
315   ShenandoahScanRememberedTask task(task_queues(), old_gen_task_queues(), rp, &regions);
316   heap->workers()->run_task(&task);
317 }
318 
319 void ShenandoahGeneration::increment_affiliated_region_count() {
320   _affiliated_region_count++;
321 }
322 
323 void ShenandoahGeneration::decrement_affiliated_region_count() {
324   _affiliated_region_count--;
325 }
326 
327 void ShenandoahGeneration::clear_used() {
328   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
329   // Do this atomically to assure visibility to other threads, even though these other threads may be idle "right now"..
330   Atomic::store(&_used, (size_t)0);
331 }
332 
333 void ShenandoahGeneration::increase_used(size_t bytes) {
334   Atomic::add(&_used, bytes);
335 }
336 
337 void ShenandoahGeneration::decrease_used(size_t bytes) {
338   assert(_used >= bytes, "cannot reduce bytes used by generation below zero");
339   Atomic::sub(&_used, bytes);
340 }
341 
342 size_t ShenandoahGeneration::used_regions_size() const {
343   return _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes();
344 }
345 
346 size_t ShenandoahGeneration::available() const {
347   size_t in_use = used();
348   size_t soft_capacity = soft_max_capacity();
349   return in_use > soft_capacity ? 0 : soft_capacity - in_use;
350 }