1 /*
2 * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/gcCause.hpp"
27 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
33 #include "logging/log.hpp"
34 #include "logging/logTag.hpp"
35 #include "runtime/globals_extension.hpp"
36
37 int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) {
38 if (a._garbage > b._garbage)
39 return -1;
40 else if (a._garbage < b._garbage)
41 return 1;
42 else return 0;
43 }
44
45 ShenandoahHeuristics::ShenandoahHeuristics() :
46 _region_data(NULL),
47 _degenerated_cycles_in_a_row(0),
48 _successful_cycles_in_a_row(0),
49 _cycle_start(os::elapsedTime()),
50 _last_cycle_end(0),
51 _gc_times_learned(0),
52 _gc_time_penalties(0),
53 _gc_time_history(new TruncatedSeq(10, ShenandoahAdaptiveDecayFactor)),
54 _metaspace_oom()
55 {
56 // No unloading during concurrent mark? Communicate that to heuristics
57 if (!ClassUnloadingWithConcurrentMark) {
58 FLAG_SET_DEFAULT(ShenandoahUnloadClassesFrequency, 0);
59 }
60
61 size_t num_regions = ShenandoahHeap::heap()->num_regions();
62 assert(num_regions > 0, "Sanity");
63
64 _region_data = NEW_C_HEAP_ARRAY(RegionData, num_regions, mtGC);
65 }
66
67 ShenandoahHeuristics::~ShenandoahHeuristics() {
68 FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
69 }
70
71 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
72 assert(collection_set->count() == 0, "Must be empty");
73
74 ShenandoahHeap* heap = ShenandoahHeap::heap();
75
76 // Check all pinned regions have updated status before choosing the collection set.
77 heap->assert_pinned_region_status();
78
79 // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
80
81 size_t num_regions = heap->num_regions();
82
83 RegionData* candidates = _region_data;
84
85 size_t cand_idx = 0;
86
87 size_t total_garbage = 0;
88
89 size_t immediate_garbage = 0;
90 size_t immediate_regions = 0;
91
92 size_t free = 0;
93 size_t free_regions = 0;
94
95 ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
96
97 for (size_t i = 0; i < num_regions; i++) {
98 ShenandoahHeapRegion* region = heap->get_region(i);
99
100 size_t garbage = region->garbage();
101 total_garbage += garbage;
102
103 if (region->is_empty()) {
104 free_regions++;
105 free += ShenandoahHeapRegion::region_size_bytes();
106 } else if (region->is_regular()) {
107 if (!region->has_live()) {
108 // We can recycle it right away and put it in the free set.
109 immediate_regions++;
110 immediate_garbage += garbage;
111 region->make_trash_immediate();
112 } else {
113 // This is our candidate for later consideration.
114 candidates[cand_idx]._region = region;
115 candidates[cand_idx]._garbage = garbage;
116 cand_idx++;
117 }
118 } else if (region->is_humongous_start()) {
119 // Reclaim humongous regions here, and count them as the immediate garbage
120 #ifdef ASSERT
121 bool reg_live = region->has_live();
122 bool bm_live = ctx->is_marked(cast_to_oop(region->bottom()));
123 assert(reg_live == bm_live,
124 "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT,
125 BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
126 #endif
127 if (!region->has_live()) {
128 heap->trash_humongous_region_at(region);
129
130 // Count only the start. Continuations would be counted on "trash" path
131 immediate_regions++;
132 immediate_garbage += garbage;
133 }
134 } else if (region->is_trash()) {
135 // Count in just trashed collection set, during coalesced CM-with-UR
136 immediate_regions++;
137 immediate_garbage += garbage;
138 }
139 }
140
141 // Step 2. Look back at garbage statistics, and decide if we want to collect anything,
142 // given the amount of immediately reclaimable garbage. If we do, figure out the collection set.
143
144 assert (immediate_garbage <= total_garbage,
145 "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s",
146 byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage),
147 byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage));
148
149 size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
150
151 if (immediate_percent <= ShenandoahImmediateThreshold) {
152 choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
153 }
154
155 size_t cset_percent = (total_garbage == 0) ? 0 : (collection_set->garbage() * 100 / total_garbage);
156
157 size_t collectable_garbage = collection_set->garbage() + immediate_garbage;
158 size_t collectable_garbage_percent = (total_garbage == 0) ? 0 : (collectable_garbage * 100 / total_garbage);
159
160 log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), "
161 "Immediate: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), "
162 "CSet: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%)",
163
164 byte_size_in_proper_unit(collectable_garbage),
165 proper_unit_for_byte_size(collectable_garbage),
166 collectable_garbage_percent,
167
168 byte_size_in_proper_unit(immediate_garbage),
169 proper_unit_for_byte_size(immediate_garbage),
170 immediate_percent,
171
172 byte_size_in_proper_unit(collection_set->garbage()),
173 proper_unit_for_byte_size(collection_set->garbage()),
174 cset_percent);
175 }
176
177 void ShenandoahHeuristics::record_cycle_start() {
178 _cycle_start = os::elapsedTime();
179 }
180
181 void ShenandoahHeuristics::record_cycle_end() {
182 _last_cycle_end = os::elapsedTime();
183 }
184
185 bool ShenandoahHeuristics::should_start_gc() {
186 // Perform GC to cleanup metaspace
187 if (has_metaspace_oom()) {
188 // Some of vmTestbase/metaspace tests depend on following line to count GC cycles
189 log_info(gc)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold));
190 return true;
191 }
192
193 if (ShenandoahGuaranteedGCInterval > 0) {
194 double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
195 if (last_time_ms > ShenandoahGuaranteedGCInterval) {
196 log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)",
197 last_time_ms, ShenandoahGuaranteedGCInterval);
198 return true;
199 }
200 }
201
202 return false;
203 }
204
205 bool ShenandoahHeuristics::should_degenerate_cycle() {
206 return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold;
207 }
208
209 void ShenandoahHeuristics::adjust_penalty(intx step) {
210 assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100,
211 "In range before adjustment: " INTX_FORMAT, _gc_time_penalties);
212
213 intx new_val = _gc_time_penalties + step;
214 if (new_val < 0) {
215 new_val = 0;
216 }
217 if (new_val > 100) {
218 new_val = 100;
219 }
220 _gc_time_penalties = new_val;
221
222 assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100,
223 "In range after adjustment: " INTX_FORMAT, _gc_time_penalties);
224 }
225
226 void ShenandoahHeuristics::record_success_concurrent() {
227 _degenerated_cycles_in_a_row = 0;
228 _successful_cycles_in_a_row++;
229
230 _gc_time_history->add(time_since_last_gc());
231 _gc_times_learned++;
232
233 adjust_penalty(Concurrent_Adjust);
234 }
235
236 void ShenandoahHeuristics::record_success_degenerated() {
237 _degenerated_cycles_in_a_row++;
238 _successful_cycles_in_a_row = 0;
239
240 adjust_penalty(Degenerated_Penalty);
241 }
242
243 void ShenandoahHeuristics::record_success_full() {
244 _degenerated_cycles_in_a_row = 0;
245 _successful_cycles_in_a_row++;
246
247 adjust_penalty(Full_Penalty);
248 }
249
250 void ShenandoahHeuristics::record_allocation_failure_gc() {
251 // Do nothing.
271 }
272
273 bool ShenandoahHeuristics::should_unload_classes() {
274 if (!can_unload_classes_normal()) return false;
275 if (has_metaspace_oom()) return true;
276 size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter();
277 // Unload classes every Nth GC cycle.
278 // This should not happen in the same cycle as process_references to amortize costs.
279 // Offsetting by one is enough to break the rendezvous when periods are equal.
280 // When periods are not equal, offsetting by one is just as good as any other guess.
281 return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
282 }
283
284 void ShenandoahHeuristics::initialize() {
285 // Nothing to do by default.
286 }
287
288 double ShenandoahHeuristics::time_since_last_gc() const {
289 return os::elapsedTime() - _cycle_start;
290 }
|
1 /*
2 * Copyright (c) 2018, 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/gcCause.hpp"
27 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
28 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc/shenandoah/shenandoahGeneration.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
33 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
34 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
35 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
36 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
37 #include "gc/shenandoah/mode/shenandoahMode.hpp"
38 #include "logging/log.hpp"
39 #include "logging/logTag.hpp"
40 #include "runtime/globals_extension.hpp"
41
42 int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) {
43 if (a._garbage > b._garbage)
44 return -1;
45 else if (a._garbage < b._garbage)
46 return 1;
47 else return 0;
48 }
49
50 ShenandoahHeuristics::ShenandoahHeuristics(ShenandoahGeneration* generation) :
51 _generation(generation),
52 _region_data(NULL),
53 _degenerated_cycles_in_a_row(0),
54 _successful_cycles_in_a_row(0),
55 _guaranteed_gc_interval(0),
56 _cycle_start(os::elapsedTime()),
57 _last_cycle_end(0),
58 _gc_times_learned(0),
59 _gc_time_penalties(0),
60 _gc_time_history(new TruncatedSeq(10, ShenandoahAdaptiveDecayFactor)),
61 _live_memory_last_cycle(0),
62 _live_memory_penultimate_cycle(0),
63 _metaspace_oom()
64 {
65 // No unloading during concurrent mark? Communicate that to heuristics
66 if (!ClassUnloadingWithConcurrentMark) {
67 FLAG_SET_DEFAULT(ShenandoahUnloadClassesFrequency, 0);
68 }
69
70 size_t num_regions = ShenandoahHeap::heap()->num_regions();
71 assert(num_regions > 0, "Sanity");
72
73 _region_data = NEW_C_HEAP_ARRAY(RegionData, num_regions, mtGC);
74 }
75
76 ShenandoahHeuristics::~ShenandoahHeuristics() {
77 FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
78 }
79
80 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, ShenandoahOldHeuristics* old_heuristics) {
81
82 ShenandoahHeap* heap = ShenandoahHeap::heap();
83
84 assert(collection_set->count() == 0, "Must be empty");
85 assert(_generation->generation_mode() != OLD, "Old GC invokes ShenandoahOldHeuristics::choose_collection_set()");
86
87 // Check all pinned regions have updated status before choosing the collection set.
88 heap->assert_pinned_region_status();
89
90 // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
91
92 size_t num_regions = heap->num_regions();
93
94 RegionData* candidates = _region_data;
95
96 size_t cand_idx = 0;
97
98 size_t total_garbage = 0;
99
100 size_t immediate_garbage = 0;
101 size_t immediate_regions = 0;
102
103 size_t free = 0;
104 size_t free_regions = 0;
105 size_t live_memory = 0;
106
107 ShenandoahMarkingContext* const ctx = _generation->complete_marking_context();
108
109 size_t remnant_available = 0;
110 for (size_t i = 0; i < num_regions; i++) {
111 ShenandoahHeapRegion* region = heap->get_region(i);
112 if (!in_generation(region)) {
113 continue;
114 }
115
116 size_t garbage = region->garbage();
117 total_garbage += garbage;
118
119 if (region->is_empty()) {
120 free_regions++;
121 free += ShenandoahHeapRegion::region_size_bytes();
122 } else if (region->is_regular()) {
123 if (!region->has_live()) {
124 // We can recycle it right away and put it in the free set.
125 immediate_regions++;
126 immediate_garbage += garbage;
127 region->make_trash_immediate();
128 } else {
129 assert (_generation->generation_mode() != OLD, "OLD is handled elsewhere");
130
131 live_memory += region->get_live_data_bytes();
132 // This is our candidate for later consideration.
133 candidates[cand_idx]._region = region;
134 if (heap->mode()->is_generational() && (region->age() >= InitialTenuringThreshold)) {
135 // Bias selection of regions that have reached tenure age
136 for (uint j = region->age() - InitialTenuringThreshold; j > 0; j--) {
137 garbage = (garbage + ShenandoahTenuredRegionUsageBias) * ShenandoahTenuredRegionUsageBias;
138 }
139 }
140 candidates[cand_idx]._garbage = garbage;
141 cand_idx++;
142 }
143 } else if (region->is_humongous_start()) {
144
145 // Reclaim humongous regions here, and count them as the immediate garbage
146 #ifdef ASSERT
147 bool reg_live = region->has_live();
148 bool bm_live = ctx->is_marked(cast_to_oop(region->bottom()));
149 assert(reg_live == bm_live,
150 "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT,
151 BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
152 #endif
153 if (!region->has_live()) {
154 heap->trash_humongous_region_at(region);
155
156 // Count only the start. Continuations would be counted on "trash" path
157 immediate_regions++;
158 immediate_garbage += garbage;
159 } else {
160 live_memory += region->get_live_data_bytes();
161 }
162 } else if (region->is_trash()) {
163 // Count in just trashed collection set, during coalesced CM-with-UR
164 immediate_regions++;
165 immediate_garbage += garbage;
166 } else { // region->is_humongous_cont() and !region->is_trash()
167 live_memory += region->get_live_data_bytes();
168 }
169 }
170
171 save_last_live_memory(live_memory);
172
173 // Step 2. Look back at garbage statistics, and decide if we want to collect anything,
174 // given the amount of immediately reclaimable garbage. If we do, figure out the collection set.
175
176 assert (immediate_garbage <= total_garbage,
177 "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "%s vs " SIZE_FORMAT "%s",
178 byte_size_in_proper_unit(immediate_garbage), proper_unit_for_byte_size(immediate_garbage),
179 byte_size_in_proper_unit(total_garbage), proper_unit_for_byte_size(total_garbage));
180
181 size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
182
183 if (immediate_percent <= ShenandoahImmediateThreshold) {
184
185 if (old_heuristics != NULL) {
186 old_heuristics->prime_collection_set(collection_set);
187
188 size_t bytes_reserved_for_old_evacuation = collection_set->get_old_bytes_reserved_for_evacuation();
189 if (bytes_reserved_for_old_evacuation * ShenandoahEvacWaste < heap->get_old_evac_reserve()) {
190 size_t old_evac_reserve = (size_t) (bytes_reserved_for_old_evacuation * ShenandoahEvacWaste);
191 heap->set_old_evac_reserve(old_evac_reserve);
192 }
193 }
194 // else, this is global collection and doesn't need to prime_collection_set
195
196 ShenandoahYoungGeneration* young_generation = heap->young_generation();
197 size_t young_evacuation_reserve = (young_generation->soft_max_capacity() * ShenandoahEvacReserve) / 100;
198
199 // At this point, young_generation->available() does not know about recently discovered immediate garbage.
200 // What memory it does think to be available is not entirely trustworthy because any available memory associated
201 // with a region that is placed into the collection set becomes unavailable when the region is chosen
202 // for the collection set. We'll compute an approximation of young available. If young_available is zero,
203 // we'll need to borrow from old-gen in order to evacuate. If there's nothing to borrow, we're going to
204 // degenerate to full GC.
205
206 // TODO: young_available can include available (between top() and end()) within each young region that is not
207 // part of the collection set. Making this memory available to the young_evacuation_reserve allows a larger
208 // young collection set to be chosen when available memory is under extreme pressure. Implementing this "improvement"
209 // is tricky, because the incremental construction of the collection set actually changes the amount of memory
210 // available to hold evacuated young-gen objects. As currently implemented, the memory that is available within
211 // non-empty regions that are not selected as part of the collection set can be allocated by the mutator while
212 // GC is evacuating and updating references.
213
214 size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
215 size_t free_affiliated_regions = immediate_regions + free_regions;
216 size_t young_available = (free_affiliated_regions + young_generation->free_unaffiliated_regions()) * region_size_bytes;
217
218 size_t regions_available_to_loan = 0;
219
220 if (heap->mode()->is_generational()) {
221 // Now that we've primed the collection set, we can figure out how much memory to reserve for evacuation
222 // of young-gen objects.
223 //
224 // YoungEvacuationReserve for young generation: how much memory are we reserving to hold the results
225 // of evacuating young collection set regions? This is typically smaller than the total amount
226 // of available memory, and is also smaller than the total amount of marked live memory within
227 // young-gen. This value is the minimum of:
228 // 1. young_gen->available() + (old_gen->available - (OldEvacuationReserve + PromotionReserve))
229 // 2. young_gen->capacity() * ShenandoahEvacReserve
230 //
231 // Note that any region added to the collection set will be completely evacuated and its memory will
232 // be completely recycled at the end of GC. The recycled memory will be at least as great as the
233 // memory borrowed from old-gen. Enforce that the amount borrowed from old-gen for YoungEvacuationReserve
234 // is an integral number of entire heap regions.
235 //
236 young_evacuation_reserve -= heap->get_old_evac_reserve();
237
238 // Though we cannot know the evacuation_supplement until after we have computed the collection set, we do
239 // know that every young-gen region added to the collection set will have a net positive impact on available
240 // memory within young-gen, since each contributes a positive amount of garbage to available. Thus, even
241 // without knowing the exact composition of the collection set, we can allow young_evacuation_reserve to
242 // exceed young_available if there are empty regions available within old-gen to hold the results of evacuation.
243
244 ShenandoahGeneration* old_generation = heap->old_generation();
245
246 // Not all of what is currently available within young-gen can be reserved to hold the results of young-gen
247 // evacuation. This is because memory available within any heap region that is placed into the collection set
248 // is not available to be allocated during evacuation. To be safe, we assure that all memory required for evacuation
249 // is available within "virgin" heap regions.
250
251 const size_t available_young_regions = free_regions + immediate_regions + young_generation->free_unaffiliated_regions();
252 const size_t available_old_regions = old_generation->free_unaffiliated_regions();
253 size_t already_reserved_old_bytes = heap->get_old_evac_reserve() + heap->get_promotion_reserve();
254 size_t regions_reserved_for_evac_and_promotion = (already_reserved_old_bytes + region_size_bytes - 1) / region_size_bytes;
255 regions_available_to_loan = available_old_regions - regions_reserved_for_evac_and_promotion;
256
257 if (available_young_regions * region_size_bytes < young_evacuation_reserve) {
258 // Try to borrow old-gen regions in order to avoid shrinking young_evacuation_reserve
259 size_t loan_request = young_evacuation_reserve - available_young_regions * region_size_bytes;
260 size_t loaned_region_request = (loan_request + region_size_bytes - 1) / region_size_bytes;
261 if (loaned_region_request > regions_available_to_loan) {
262 // Scale back young_evacuation_reserve to consume all available young and old regions. After the
263 // collection set is chosen, we may get some of this memory back for pacing allocations during evacuation
264 // and update refs.
265 loaned_region_request = regions_available_to_loan;
266 young_evacuation_reserve = (available_young_regions + loaned_region_request) * region_size_bytes;
267 } else {
268 // No need to scale back young_evacuation_reserve.
269 }
270 } else {
271 // No need scale back young_evacuation_reserve and no need to borrow from old-gen. We may even have some
272 // available_young_regions to support allocation pacing.
273 }
274
275 } else if (young_evacuation_reserve > young_available) {
276 // In non-generational mode, there's no old-gen memory to borrow from
277 young_evacuation_reserve = young_available;
278 }
279
280 heap->set_young_evac_reserve(young_evacuation_reserve);
281
282 // Add young-gen regions into the collection set. This is a virtual call, implemented differently by each
283 // of the heuristics subclasses.
284 choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
285
286 // Now compute the evacuation supplement, which is extra memory borrowed from old-gen that can be allocated
287 // by mutators while GC is working on evacuation and update-refs.
288
289 // During evacuation and update refs, we will be able to allocate any memory that is currently available
290 // plus any memory that can be borrowed on the collateral of the current collection set, reserving a certain
291 // percentage of the anticipated replenishment from collection set memory to be allocated during the subsequent
292 // concurrent marking effort. This is how much I can repay.
293 size_t potential_supplement_regions = collection_set->get_young_region_count();
294
295 // Though I can repay potential_supplement_regions, I can't borrow them unless they are available in old-gen.
296 if (potential_supplement_regions > regions_available_to_loan) {
297 potential_supplement_regions = regions_available_to_loan;
298 }
299
300 size_t potential_evac_supplement;
301
302 // How much of the potential_supplement_regions will be consumed by young_evacuation_reserve: borrowed_evac_regions.
303 const size_t available_unaffiliated_young_regions = young_generation->free_unaffiliated_regions();
304 const size_t available_affiliated_regions = free_regions + immediate_regions;
305 const size_t available_young_regions = available_unaffiliated_young_regions + available_affiliated_regions;
306 size_t young_evac_regions = (young_evacuation_reserve + region_size_bytes - 1) / region_size_bytes;
307 size_t borrowed_evac_regions = (young_evac_regions > available_young_regions)? young_evac_regions - available_young_regions: 0;
308
309 potential_supplement_regions -= borrowed_evac_regions;
310 potential_evac_supplement = potential_supplement_regions * region_size_bytes;
311
312 // Leave some allocation runway for subsequent concurrent mark phase.
313 potential_evac_supplement = (potential_evac_supplement * ShenandoahBorrowPercent) / 100;
314
315 heap->set_alloc_supplement_reserve(potential_evac_supplement);
316
317 size_t promotion_budget = heap->get_promotion_reserve();
318 size_t old_evac_budget = heap->get_old_evac_reserve();
319 size_t alloc_budget_evac_and_update = potential_evac_supplement + young_available;
320
321 // TODO: young_available, which feeds into alloc_budget_evac_and_update is lacking memory available within
322 // existing young-gen regions that were not selected for the collection set. Add this in and adjust the
323 // log message (where it says "empty-region allocation budget").
324
325 log_info(gc, ergo)("Memory reserved for evacuation and update-refs includes promotion budget: " SIZE_FORMAT
326 "%s, young evacuation budget: " SIZE_FORMAT "%s, old evacuation budget: " SIZE_FORMAT
327 "%s, empty-region allocation budget: " SIZE_FORMAT "%s, including supplement: " SIZE_FORMAT "%s",
328 byte_size_in_proper_unit(promotion_budget), proper_unit_for_byte_size(promotion_budget),
329 byte_size_in_proper_unit(young_evacuation_reserve), proper_unit_for_byte_size(young_evacuation_reserve),
330 byte_size_in_proper_unit(old_evac_budget), proper_unit_for_byte_size(old_evac_budget),
331 byte_size_in_proper_unit(alloc_budget_evac_and_update),
332 proper_unit_for_byte_size(alloc_budget_evac_and_update),
333 byte_size_in_proper_unit(potential_evac_supplement), proper_unit_for_byte_size(potential_evac_supplement));
334 } else {
335 // we're going to skip evacuation and update refs because we reclaimed sufficient amounts of immediate garbage.
336 heap->shenandoah_policy()->record_abbreviated_cycle();
337 }
338
339 if (collection_set->has_old_regions()) {
340 heap->shenandoah_policy()->record_mixed_cycle();
341 }
342
343 size_t cset_percent = (total_garbage == 0) ? 0 : (collection_set->garbage() * 100 / total_garbage);
344 size_t collectable_garbage = collection_set->garbage() + immediate_garbage;
345 size_t collectable_garbage_percent = (total_garbage == 0) ? 0 : (collectable_garbage * 100 / total_garbage);
346
347 log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), "
348 "Immediate: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%), "
349 "CSet: " SIZE_FORMAT "%s (" SIZE_FORMAT "%%)",
350
351 byte_size_in_proper_unit(collectable_garbage),
352 proper_unit_for_byte_size(collectable_garbage),
353 collectable_garbage_percent,
354
355 byte_size_in_proper_unit(immediate_garbage),
356 proper_unit_for_byte_size(immediate_garbage),
357 immediate_percent,
358
359 byte_size_in_proper_unit(collection_set->garbage()),
360 proper_unit_for_byte_size(collection_set->garbage()),
361 cset_percent);
362 }
363
364 void ShenandoahHeuristics::record_cycle_start() {
365 _cycle_start = os::elapsedTime();
366 }
367
368 void ShenandoahHeuristics::record_cycle_end() {
369 _last_cycle_end = os::elapsedTime();
370 }
371
372 bool ShenandoahHeuristics::should_start_gc() {
373 // Perform GC to cleanup metaspace
374 if (has_metaspace_oom()) {
375 // Some of vmTestbase/metaspace tests depend on following line to count GC cycles
376 log_info(gc)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold));
377 return true;
378 }
379
380 if (_guaranteed_gc_interval > 0) {
381 double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
382 if (last_time_ms > _guaranteed_gc_interval) {
383 log_info(gc)("Trigger (%s): Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)",
384 _generation->name(), last_time_ms, _guaranteed_gc_interval);
385 return true;
386 }
387 }
388
389 return false;
390 }
391
392 bool ShenandoahHeuristics::should_degenerate_cycle() {
393 return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold;
394 }
395
396 void ShenandoahHeuristics::adjust_penalty(intx step) {
397 assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100,
398 "In range before adjustment: " INTX_FORMAT, _gc_time_penalties);
399
400 intx new_val = _gc_time_penalties + step;
401 if (new_val < 0) {
402 new_val = 0;
403 }
404 if (new_val > 100) {
405 new_val = 100;
406 }
407 _gc_time_penalties = new_val;
408
409 assert(0 <= _gc_time_penalties && _gc_time_penalties <= 100,
410 "In range after adjustment: " INTX_FORMAT, _gc_time_penalties);
411 }
412
413 void ShenandoahHeuristics::record_success_concurrent(bool abbreviated) {
414 _degenerated_cycles_in_a_row = 0;
415 _successful_cycles_in_a_row++;
416
417 if (!(abbreviated && ShenandoahAdaptiveIgnoreShortCycles)) {
418 _gc_time_history->add(time_since_last_gc());
419 _gc_times_learned++;
420 }
421
422 adjust_penalty(Concurrent_Adjust);
423 }
424
425 void ShenandoahHeuristics::record_success_degenerated() {
426 _degenerated_cycles_in_a_row++;
427 _successful_cycles_in_a_row = 0;
428
429 adjust_penalty(Degenerated_Penalty);
430 }
431
432 void ShenandoahHeuristics::record_success_full() {
433 _degenerated_cycles_in_a_row = 0;
434 _successful_cycles_in_a_row++;
435
436 adjust_penalty(Full_Penalty);
437 }
438
439 void ShenandoahHeuristics::record_allocation_failure_gc() {
440 // Do nothing.
460 }
461
462 bool ShenandoahHeuristics::should_unload_classes() {
463 if (!can_unload_classes_normal()) return false;
464 if (has_metaspace_oom()) return true;
465 size_t cycle = ShenandoahHeap::heap()->shenandoah_policy()->cycle_counter();
466 // Unload classes every Nth GC cycle.
467 // This should not happen in the same cycle as process_references to amortize costs.
468 // Offsetting by one is enough to break the rendezvous when periods are equal.
469 // When periods are not equal, offsetting by one is just as good as any other guess.
470 return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
471 }
472
473 void ShenandoahHeuristics::initialize() {
474 // Nothing to do by default.
475 }
476
477 double ShenandoahHeuristics::time_since_last_gc() const {
478 return os::elapsedTime() - _cycle_start;
479 }
480
481 bool ShenandoahHeuristics::in_generation(ShenandoahHeapRegion* region) {
482 return ((_generation->generation_mode() == GLOBAL)
483 || (_generation->generation_mode() == YOUNG && region->affiliation() == YOUNG_GENERATION)
484 || (_generation->generation_mode() == OLD && region->affiliation() == OLD_GENERATION));
485 }
486
487 void ShenandoahHeuristics::save_last_live_memory(size_t live_memory) {
488 _live_memory_penultimate_cycle = _live_memory_last_cycle;
489 _live_memory_last_cycle = live_memory;
490 }
491
492 size_t ShenandoahHeuristics::get_last_live_memory() {
493 return _live_memory_last_cycle;
494 }
495
496 size_t ShenandoahHeuristics::get_penultimate_live_memory() {
497 return _live_memory_penultimate_cycle;
498 }
|