1 /*
2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
28 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc/shenandoah/shenandoahGenerationalHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
33 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
34
35 #include "utilities/quickSort.hpp"
36
37 ShenandoahYoungHeuristics::ShenandoahYoungHeuristics(ShenandoahYoungGeneration* generation)
38 : ShenandoahGenerationalHeuristics(generation) {
39 }
40
41
42 void ShenandoahYoungHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
43 RegionData* data, size_t size,
44 size_t actual_free) {
45 // See comments in ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata():
46 // we do the same here, but with the following adjustments for generational mode:
47 //
48 // In generational mode, the sort order within the data array is not strictly descending amounts
49 // of garbage. In particular, regions that have reached tenure age will be sorted into this
50 // array before younger regions that typically contain more garbage. This is one reason why,
51 // for example, we continue examining regions even after rejecting a region that has
52 // more live data than we can evacuate.
53
54 // Better select garbage-first regions
55 QuickSort::sort<RegionData>(data, (int) size, compare_by_garbage, false);
56
57 size_t cur_young_garbage = add_preselected_regions_to_collection_set(cset, data, size);
58
59 choose_young_collection_set(cset, data, size, actual_free, cur_young_garbage);
60
61 log_cset_composition(cset);
62 }
63
64 void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollectionSet* cset,
65 const RegionData* data,
66 size_t size, size_t actual_free,
67 size_t cur_young_garbage) const {
68
69 const auto heap = ShenandoahGenerationalHeap::heap();
70
71 const size_t capacity = heap->soft_max_capacity();
72 const size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
73 const size_t ignore_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahIgnoreGarbageThreshold / 100;
74
75 // This is young-gen collection or a mixed evacuation.
76 // If this is mixed evacuation, the old-gen candidate regions have already been added.
77 size_t cur_cset = 0;
78 const size_t max_cset = (size_t) (heap->young_generation()->get_evacuation_reserve() / ShenandoahEvacWaste);
79 const size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_cset;
80 const size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
81
82
83 log_info(gc, ergo)(
84 "Adaptive CSet Selection for YOUNG. Max Evacuation: " SIZE_FORMAT "%s, Actual Free: " SIZE_FORMAT "%s.",
85 byte_size_in_proper_unit(max_cset), proper_unit_for_byte_size(max_cset),
86 byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free));
87
88 for (size_t idx = 0; idx < size; idx++) {
89 ShenandoahHeapRegion* r = data[idx].get_region();
90 if (cset->is_preselected(r->index())) {
91 continue;
92 }
93
94 // Note that we do not add tenurable regions if they were not pre-selected. They were not preselected
95 // because there is insufficient room in old-gen to hold their to-be-promoted live objects or because
96 // they are to be promoted in place.
97 if (!heap->is_tenurable(r)) {
98 const size_t new_cset = cur_cset + r->get_live_data_bytes();
99 const size_t region_garbage = r->garbage();
100 const size_t new_garbage = cur_young_garbage + region_garbage;
101 const bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
102 assert(r->is_young(), "Only young candidates expected in the data array");
103 if ((new_cset <= max_cset) && (add_regardless || (region_garbage > garbage_threshold))) {
104 cur_cset = new_cset;
105 cur_young_garbage = new_garbage;
106 cset->add_region(r);
107 }
108 }
109 }
110 }
111
112
113 bool ShenandoahYoungHeuristics::should_start_gc() {
114 auto heap = ShenandoahGenerationalHeap::heap();
115 ShenandoahOldGeneration* old_generation = heap->old_generation();
116 ShenandoahOldHeuristics* old_heuristics = old_generation->heuristics();
117
118 // Checks that an old cycle has run for at least ShenandoahMinimumOldTimeMs before allowing a young cycle.
119 if (ShenandoahMinimumOldTimeMs > 0) {
120 if (old_generation->is_preparing_for_mark() || old_generation->is_concurrent_mark_in_progress()) {
121 size_t old_time_elapsed = size_t(old_heuristics->elapsed_cycle_time() * 1000);
122 if (old_time_elapsed < ShenandoahMinimumOldTimeMs) {
123 // Do not decline_trigger() when waiting for minimum quantum of Old-gen marking. It is not at our discretion
124 // to trigger at this time.
125 return false;
126 }
127 }
128 }
129
130 // inherited triggers have already decided to start a cycle, so no further evaluation is required
131 if (ShenandoahAdaptiveHeuristics::should_start_gc()) {
132 return true;
133 }
134
135 // Get through promotions and mixed evacuations as quickly as possible. These cycles sometimes require significantly
136 // more time than traditional young-generation cycles so start them up as soon as possible. This is a "mitigation"
137 // for the reality that old-gen and young-gen activities are not truly "concurrent". If there is old-gen work to
138 // be done, we start up the young-gen GC threads so they can do some of this old-gen work. As implemented, promotion
139 // gets priority over old-gen marking.
140 size_t promo_expedite_threshold = percent_of(heap->young_generation()->max_capacity(), ShenandoahExpeditePromotionsThreshold);
141 size_t promo_potential = old_generation->get_promotion_potential();
142 if (promo_potential > promo_expedite_threshold) {
143 // Detect unsigned arithmetic underflow
144 assert(promo_potential < heap->capacity(), "Sanity");
145 log_trigger("Expedite promotion of " PROPERFMT, PROPERFMTARGS(promo_potential));
146 accept_trigger();
147 return true;
148 }
149
150 size_t mixed_candidates = old_heuristics->unprocessed_old_collection_candidates();
151 if (mixed_candidates > ShenandoahExpediteMixedThreshold && !heap->is_concurrent_weak_root_in_progress()) {
152 // We need to run young GC in order to open up some free heap regions so we can finish mixed evacuations.
153 // If concurrent weak root processing is in progress, it means the old cycle has chosen mixed collection
154 // candidates, but has not completed. There is no point in trying to start the young cycle before the old
155 // cycle completes.
156 log_trigger("Expedite mixed evacuation of " SIZE_FORMAT " regions", mixed_candidates);
157 accept_trigger();
158 return true;
159 }
160
161 // Don't decline_trigger() here That was done in ShenandoahAdaptiveHeuristics::should_start_gc()
162 return false;
163 }
164
165 // Return a conservative estimate of how much memory can be allocated before we need to start GC. The estimate is based
166 // on memory that is currently available within young generation plus all of the memory that will be added to the young
167 // generation at the end of the current cycle (as represented by young_regions_to_be_reclaimed) and on the anticipated
168 // amount of time required to perform a GC.
169 size_t ShenandoahYoungHeuristics::bytes_of_allocation_runway_before_gc_trigger(size_t young_regions_to_be_reclaimed) {
170 size_t capacity = _space_info->max_capacity();
171 size_t usage = _space_info->used();
172 size_t available = (capacity > usage)? capacity - usage: 0;
173 size_t allocated = _space_info->bytes_allocated_since_gc_start();
174
175 size_t available_young_collected = ShenandoahHeap::heap()->collection_set()->get_young_available_bytes_collected();
176 size_t anticipated_available =
177 available + young_regions_to_be_reclaimed * ShenandoahHeapRegion::region_size_bytes() - available_young_collected;
178 size_t spike_headroom = capacity * ShenandoahAllocSpikeFactor / 100;
179 size_t penalties = capacity * _gc_time_penalties / 100;
180
181 double rate = _allocation_rate.sample(allocated);
182
183 // At what value of available, would avg and spike triggers occur?
184 // if allocation_headroom < avg_cycle_time * avg_alloc_rate, then we experience avg trigger
185 // if allocation_headroom < avg_cycle_time * rate, then we experience spike trigger if is_spiking
186 //
187 // allocation_headroom =
188 // 0, if penalties > available or if penalties + spike_headroom > available
189 // available - penalties - spike_headroom, otherwise
190 //
191 // so we trigger if available - penalties - spike_headroom < avg_cycle_time * avg_alloc_rate, which is to say
192 // available < avg_cycle_time * avg_alloc_rate + penalties + spike_headroom
193 // or if available < penalties + spike_headroom
194 //
195 // since avg_cycle_time * avg_alloc_rate > 0, the first test is sufficient to test both conditions
196 //
197 // thus, evac_slack_avg is MIN2(0, available - avg_cycle_time * avg_alloc_rate + penalties + spike_headroom)
198 //
199 // similarly, evac_slack_spiking is MIN2(0, available - avg_cycle_time * rate + penalties + spike_headroom)
200 // but evac_slack_spiking is only relevant if is_spiking, as defined below.
201
202 double avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd());
203 double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd);
204 size_t evac_slack_avg;
205 if (anticipated_available > avg_cycle_time * avg_alloc_rate + penalties + spike_headroom) {
206 evac_slack_avg = anticipated_available - (avg_cycle_time * avg_alloc_rate + penalties + spike_headroom);
207 } else {
208 // we have no slack because it's already time to trigger
209 evac_slack_avg = 0;
210 }
211
212 bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd);
213 size_t evac_slack_spiking;
214 if (is_spiking) {
215 if (anticipated_available > avg_cycle_time * rate + penalties + spike_headroom) {
216 evac_slack_spiking = anticipated_available - (avg_cycle_time * rate + penalties + spike_headroom);
217 } else {
218 // we have no slack because it's already time to trigger
219 evac_slack_spiking = 0;
220 }
221 } else {
222 evac_slack_spiking = evac_slack_avg;
223 }
224
225 size_t threshold = min_free_threshold();
226 size_t evac_min_threshold = (anticipated_available > threshold)? anticipated_available - threshold: 0;
227 return MIN3(evac_slack_spiking, evac_slack_avg, evac_min_threshold);
228 }
229