1 /*
  2  * Copyright (c) 2021, Amazon.com, Inc. or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 28 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 31 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 32 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 33 #include "utilities/quickSort.hpp"
 34 
 35 uint ShenandoahOldHeuristics::NOT_FOUND = -1U;
 36 
 37 ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* generation, ShenandoahHeuristics* trigger_heuristic) :
 38   ShenandoahHeuristics(generation),
 39 #ifdef ASSERT
 40   _start_candidate(0),
 41 #endif
 42   _first_pinned_candidate(NOT_FOUND),
 43   _last_old_collection_candidate(0),
 44   _next_old_collection_candidate(0),
 45   _last_old_region(0),
 46   _trigger_heuristic(trigger_heuristic),
 47   _promotion_failed(false),
 48   _old_generation(generation)
 49 {
 50   assert(_generation->generation_mode() == OLD, "This service only available for old-gc heuristics");
 51 }
 52 
 53 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
 54   if (unprocessed_old_collection_candidates() == 0) {
 55     return false;
 56   }
 57 
 58   _first_pinned_candidate = NOT_FOUND;
 59 
 60   ShenandoahHeap* heap = ShenandoahHeap::heap();
 61   uint included_old_regions = 0;
 62   size_t evacuated_old_bytes = 0;
 63   size_t collected_old_bytes = 0;
 64 
 65   // If a region is put into the collection set, then this region's free (not yet used) bytes are no longer
 66   // "available" to hold the results of other evacuations.  This may cause a decrease in the remaining amount
 67   // of memory that can still be evacuated.  We address this by reducing the evacuation budget by the amount
 68   // of live memory in that region and by the amount of unallocated memory in that region if the evacuation
 69   // budget is constrained by availability of free memory.
 70   size_t old_evacuation_budget = (size_t) ((double) heap->get_old_evac_reserve() / ShenandoahEvacWaste);
 71   size_t remaining_old_evacuation_budget = old_evacuation_budget;
 72   size_t lost_evacuation_capacity = 0;
 73   log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u",
 74                byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
 75                unprocessed_old_collection_candidates());
 76 
 77   // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
 78   // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates()
 79   while (unprocessed_old_collection_candidates() > 0) {
 80     // Old collection candidates are sorted in order of decreasing garbage contained therein.
 81     ShenandoahHeapRegion* r = next_old_collection_candidate();
 82     if (r == nullptr) {
 83       break;
 84     }
 85 
 86     // If we choose region r to be collected, then we need to decrease the capacity to hold other evacuations by
 87     // the size of r's free memory.
 88 
 89     // It's probably overkill to compensate with lost_evacuation_capacity.  But it's the safe thing to do and
 90     //  has minimal impact on content of primed collection set.
 91     if (r->get_live_data_bytes() + lost_evacuation_capacity <= remaining_old_evacuation_budget) {
 92       // Decrement remaining evacuation budget by bytes that will be copied.
 93       lost_evacuation_capacity += r->free();
 94       remaining_old_evacuation_budget -= r->get_live_data_bytes();
 95       collection_set->add_region(r);
 96       included_old_regions++;
 97       evacuated_old_bytes += r->get_live_data_bytes();
 98       collected_old_bytes += r->garbage();
 99       consume_old_collection_candidate();
100     } else {
101       break;
102     }
103   }
104 
105   if (_first_pinned_candidate != NOT_FOUND) {
106     // Need to deal with pinned regions
107     slide_pinned_regions_to_front();
108   }
109 
110   if (included_old_regions > 0) {
111     log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)",
112                  included_old_regions,
113                  byte_size_in_proper_unit(evacuated_old_bytes), proper_unit_for_byte_size(evacuated_old_bytes),
114                  byte_size_in_proper_unit(collected_old_bytes), proper_unit_for_byte_size(collected_old_bytes));
115   }
116 
117   if (unprocessed_old_collection_candidates() == 0) {
118     _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
119   }
120 
121   return (included_old_regions > 0);
122 }
123 
124 void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
125   // Find the leftmost unpinned region. The region in this slot will have been
126   // added to the cset, so we can use it to hold pointers to regions that were
127   // pinned when the cset was chosen.
128   // [ r p r p p p r ]
129   //          ^
130   //          | first r to the left should be in the collection set now.
131   uint write_index = NOT_FOUND;
132   for (uint search = _next_old_collection_candidate - 1; search > _first_pinned_candidate; --search) {
133     ShenandoahHeapRegion* region = _region_data[search]._region;
134     if (!region->is_pinned()) {
135       write_index = search;
136       assert(region->is_cset(), "Expected unpinned region to be added to the collection set.");
137       break;
138     }
139   }
140 
141   if (write_index == NOT_FOUND) {
142     if (_first_pinned_candidate > 0) {
143       _next_old_collection_candidate = _first_pinned_candidate;
144     }
145     return;
146   }
147 
148   // Find pinned regions to the left and move their pointer into a slot
149   // that was pointing at a region that has been added to the cset.
150   // [ r p r p p p r ]
151   //       ^
152   //       | Write pointer is here. We know this region is already in the cset
153   //       | so we can clobber it with the next pinned region we find.
154   for (size_t search = write_index - 1; search > _first_pinned_candidate; --search) {
155     RegionData& skipped = _region_data[search];
156     if (skipped._region->is_pinned()) {
157       RegionData& added_to_cset = _region_data[write_index];
158       assert(added_to_cset._region->is_cset(), "Can only overwrite slots used by regions added to the collection set.");
159       added_to_cset._region = skipped._region;
160       added_to_cset._garbage = skipped._garbage;
161       --write_index;
162     }
163   }
164 
165   // Everything left should already be in the cset
166   // [ r x p p p p r ]
167   //       ^
168   //       | next pointer points at the first region which was not added
169   //       | to the collection set.
170 #ifdef ASSERT
171   for (size_t check = write_index - 1; check > _start_candidate; --check) {
172     ShenandoahHeapRegion* region = _region_data[check]._region;
173     assert(region->is_cset(), "All regions here should be in the collection set.");
174   }
175   _start_candidate = write_index;
176 #endif
177 
178   // Update to read from the leftmost pinned region.
179   _next_old_collection_candidate = write_index;
180 }
181 
182 // Both arguments are don't cares for old-gen collections
183 void ShenandoahOldHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set,
184                                                     ShenandoahOldHeuristics* old_heuristics) {
185   assert((collection_set == nullptr) && (old_heuristics == nullptr),
186          "Expect null arguments in ShenandoahOldHeuristics::choose_collection_set()");
187   // Old-gen doesn't actually choose a collection set to be evacuated by its own gang of worker tasks.
188   // Instead, it computes the set of regions to be evacuated by subsequent young-gen evacuation passes.
189   prepare_for_old_collections();
190 }
191 
192 void ShenandoahOldHeuristics::prepare_for_old_collections() {
193   assert(_generation->generation_mode() == OLD, "This service only available for old-gc heuristics");
194   ShenandoahHeap* heap = ShenandoahHeap::heap();
195 
196   size_t cand_idx = 0;
197   size_t total_garbage = 0;
198   size_t num_regions = heap->num_regions();
199   size_t immediate_garbage = 0;
200   size_t immediate_regions = 0;
201 
202   RegionData* candidates = _region_data;
203   for (size_t i = 0; i < num_regions; i++) {
204     ShenandoahHeapRegion* region = heap->get_region(i);
205     if (!in_generation(region)) {
206       continue;
207     }
208 
209     size_t garbage = region->garbage();
210     total_garbage += garbage;
211 
212     if (region->is_regular() || region->is_pinned()) {
213       if (!region->has_live()) {
214         assert(!region->is_pinned(), "Pinned region should have live (pinned) objects.");
215         region->make_trash_immediate();
216         immediate_regions++;
217         immediate_garbage += garbage;
218       } else {
219         region->begin_preemptible_coalesce_and_fill();
220         candidates[cand_idx]._region = region;
221         candidates[cand_idx]._garbage = garbage;
222         cand_idx++;
223       }
224     } else if (region->is_humongous_start()) {
225       if (!region->has_live()) {
226         // The humongous object is dead, we can just return this region and the continuations
227         // immediately to the freeset - no evacuations are necessary here. The continuations
228         // will be made into trash by this method, so they'll be skipped by the 'is_regular'
229         // check above, but we still need to count the start region.
230         immediate_regions++;
231         immediate_garbage += garbage;
232         size_t region_count = heap->trash_humongous_region_at(region);
233         log_debug(gc)("Trashed " SIZE_FORMAT " regions for humongous object.", region_count);
234       }
235     } else if (region->is_trash()) {
236       // Count humongous objects made into trash here.
237       immediate_regions++;
238       immediate_garbage += garbage;
239     }
240   }
241 
242   // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage.
243   // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate
244   // threshold for the old generation.
245 
246   // Prioritize regions to select garbage-first regions
247   QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_garbage, false);
248 
249   // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25))% garbage or more is to
250   // be evacuated.
251   //
252   // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics.
253 
254 
255   const size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold / 100;
256   size_t candidates_garbage = 0;
257   _last_old_region = (uint)cand_idx;
258   _last_old_collection_candidate = (uint)cand_idx;
259   _next_old_collection_candidate = 0;
260 
261   for (size_t i = 0; i < cand_idx; i++) {
262     if (candidates[i]._garbage < garbage_threshold) {
263       // Candidates are sorted in decreasing order of garbage, so no regions after this will be above the threshold
264       _last_old_collection_candidate = (uint)i;
265       break;
266     }
267     candidates_garbage += candidates[i]._garbage;
268   }
269 
270   // Note that we do not coalesce and fill occupied humongous regions
271   // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions
272   size_t collectable_garbage = immediate_garbage + candidates_garbage;
273   log_info(gc)("Old-Gen Collectable Garbage: " SIZE_FORMAT "%s over " UINT32_FORMAT " regions, "
274                "Old-Gen Immediate Garbage: " SIZE_FORMAT "%s over " SIZE_FORMAT " regions.",
275                byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage), _last_old_collection_candidate,
276                byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage), immediate_regions);
277 
278   if (unprocessed_old_collection_candidates() == 0) {
279     _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
280   } else {
281     _old_generation->transition_to(ShenandoahOldGeneration::WAITING);
282   }
283 }
284 
285 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() {
286   return _last_old_collection_candidate;
287 }
288 
289 uint ShenandoahOldHeuristics::unprocessed_old_collection_candidates() {
290   return _last_old_collection_candidate - _next_old_collection_candidate;
291 }
292 
293 ShenandoahHeapRegion* ShenandoahOldHeuristics::next_old_collection_candidate() {
294   while (_next_old_collection_candidate < _last_old_collection_candidate) {
295     ShenandoahHeapRegion* next = _region_data[_next_old_collection_candidate]._region;
296     if (!next->is_pinned()) {
297       return next;
298     } else {
299       assert(next->is_pinned(), "sanity");
300       if (_first_pinned_candidate == NOT_FOUND) {
301         _first_pinned_candidate = _next_old_collection_candidate;
302       }
303     }
304 
305     _next_old_collection_candidate++;
306   }
307   return nullptr;
308 }
309 
310 void ShenandoahOldHeuristics::consume_old_collection_candidate() {
311   _next_old_collection_candidate++;
312 }
313 
314 uint ShenandoahOldHeuristics::last_old_region_index() const {
315   return _last_old_region;
316 }
317 
318 unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(ShenandoahHeapRegion** buffer) {
319   uint end = _last_old_region;
320   uint index = _next_old_collection_candidate;
321   while (index < end) {
322     *buffer++ = _region_data[index++]._region;
323   }
324   return _last_old_region - _next_old_collection_candidate;
325 }
326 
327 void ShenandoahOldHeuristics::abandon_collection_candidates() {
328   _last_old_collection_candidate = 0;
329   _next_old_collection_candidate = 0;
330   _last_old_region = 0;
331 }
332 
333 void ShenandoahOldHeuristics::handle_promotion_failure() {
334   _promotion_failed = true;
335 }
336 
337 void ShenandoahOldHeuristics::record_cycle_start() {
338   _promotion_failed = false;
339   _trigger_heuristic->record_cycle_start();
340 }
341 
342 void ShenandoahOldHeuristics::record_cycle_end() {
343   _trigger_heuristic->record_cycle_end();
344 }
345 
346 bool ShenandoahOldHeuristics::should_start_gc() {
347   // Cannot start a new old-gen GC until previous one has finished.
348   //
349   // Future refinement: under certain circumstances, we might be more sophisticated about this choice.
350   // For example, we could choose to abandon the previous old collection before it has completed evacuations.
351   if (unprocessed_old_collection_candidates() > 0) {
352     return false;
353   }
354 
355   // If there's been a promotion failure (and we don't have regions already scheduled for evacuation),
356   // start a new old generation collection.
357   if (_promotion_failed) {
358     log_info(gc)("Trigger: Promotion Failure");
359     return true;
360   }
361 
362   // Otherwise, defer to configured heuristic for gc trigger.
363   return _trigger_heuristic->should_start_gc();
364 }
365 
366 bool ShenandoahOldHeuristics::should_degenerate_cycle() {
367   return _trigger_heuristic->should_degenerate_cycle();
368 }
369 
370 void ShenandoahOldHeuristics::record_success_concurrent(bool abbreviated) {
371   _trigger_heuristic->record_success_concurrent(abbreviated);
372 }
373 
374 void ShenandoahOldHeuristics::record_success_degenerated() {
375   _trigger_heuristic->record_success_degenerated();
376 }
377 
378 void ShenandoahOldHeuristics::record_success_full() {
379   _trigger_heuristic->record_success_full();
380 }
381 
382 void ShenandoahOldHeuristics::record_allocation_failure_gc() {
383   _trigger_heuristic->record_allocation_failure_gc();
384 }
385 
386 void ShenandoahOldHeuristics::record_requested_gc() {
387   _trigger_heuristic->record_requested_gc();
388 }
389 
390 bool ShenandoahOldHeuristics::can_unload_classes() {
391   return _trigger_heuristic->can_unload_classes();
392 }
393 
394 bool ShenandoahOldHeuristics::can_unload_classes_normal() {
395   return _trigger_heuristic->can_unload_classes_normal();
396 }
397 
398 bool ShenandoahOldHeuristics::should_unload_classes() {
399   return _trigger_heuristic->should_unload_classes();
400 }
401 
402 const char* ShenandoahOldHeuristics::name() {
403   static char name[128];
404   jio_snprintf(name, sizeof(name), "%s (OLD)", _trigger_heuristic->name());
405   return name;
406 }
407 
408 bool ShenandoahOldHeuristics::is_diagnostic() {
409   return false;
410 }
411 
412 bool ShenandoahOldHeuristics::is_experimental() {
413   return true;
414 }
415 
416 void ShenandoahOldHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
417                                                                     ShenandoahHeuristics::RegionData* data,
418                                                                     size_t data_size, size_t free) {
419   ShouldNotReachHere();
420 }
421