1 /*
2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP
26 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP
27
28 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
29 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
30 #include "gc/shenandoah/shenandoahGeneration.hpp"
31 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
32 #include "gc/shenandoah/shenandoahScanRemembered.hpp"
33 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
34
35 class ShenandoahHeapRegion;
36 class ShenandoahHeapRegionClosure;
37 class ShenandoahOldHeuristics;
38
39 class ShenandoahOldGeneration : public ShenandoahGeneration {
40 private:
41 ShenandoahHeapRegion** _coalesce_and_fill_region_array;
42 ShenandoahOldHeuristics* _old_heuristics;
43
44 // After determining the desired size of the old generation (see compute_old_generation_balance), this
45 // quantity represents the number of regions above (surplus) or below (deficit) that size.
46 // This value is computed prior to the actual exchange of any regions. A positive value represents
47 // a surplus of old regions which will be transferred from old _to_ young. A negative value represents
48 // a deficit of regions that will be replenished by a transfer _from_ young to old.
49 ssize_t _region_balance;
50
51 // Set when evacuation in the old generation fails. When this is set, the control thread will initiate a
52 // full GC instead of a futile degenerated cycle.
53 ShenandoahSharedFlag _failed_evacuation;
54
55 // Bytes reserved within old-gen to hold the results of promotion. This is separate from
56 // and in addition to the evacuation reserve for intra-generation evacuations (ShenandoahGeneration::_evacuation_reserve).
57 // If there is more data ready to be promoted than can fit within this reserve, the promotion of some objects will be
58 // deferred until a subsequent evacuation pass.
59 size_t _promoted_reserve;
60
61 // Bytes of old-gen memory expended on promotions. This may be modified concurrently
62 // by mutators and gc workers when promotion LABs are retired during evacuation. It
63 // is therefore always accessed through atomic operations. This is increased when a
64 // PLAB is allocated for promotions. The value is decreased by the amount of memory
65 // remaining in a PLAB when it is retired.
66 size_t _promoted_expended;
67
68 // Represents the quantity of live bytes we expect to promote in place during the next
69 // evacuation cycle. This value is used by the young heuristic to trigger mixed collections.
70 // It is also used when computing the optimum size for the old generation.
71 size_t _promotion_potential;
72
73 // When a region is selected to be promoted in place, the remaining free memory is filled
74 // in to prevent additional allocations (preventing premature promotion of newly allocated
75 // objects. This field records the total amount of padding used for such regions.
76 size_t _pad_for_promote_in_place;
77
78 // During construction of the collection set, we keep track of regions that are eligible
79 // for promotion in place. These fields track the count of those humongous and regular regions.
80 // This data is used to force the evacuation phase even when the collection set is otherwise
81 // empty.
82 size_t _promotable_humongous_regions;
83 size_t _promotable_regular_regions;
84
85 // True if old regions may be safely traversed by the remembered set scan.
86 bool _is_parsable;
87
88 bool coalesce_and_fill();
89
90 public:
91 ShenandoahOldGeneration(uint max_queues, size_t max_capacity);
92
93 ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode) override;
94
95 const char* name() const override {
96 return "Old";
97 }
98
99 ShenandoahOldHeuristics* heuristics() const override {
100 return _old_heuristics;
101 }
102
103 // See description in field declaration
104 void set_promoted_reserve(size_t new_val);
105 size_t get_promoted_reserve() const;
106
107 // The promotion reserve is increased when rebuilding the free set transfers a region to the old generation
108 void augment_promoted_reserve(size_t increment);
109
110 // This zeros out the expended promotion count after the promotion reserve is computed
111 void reset_promoted_expended();
112
113 // This is incremented when allocations are made to copy promotions into the old generation
114 size_t expend_promoted(size_t increment);
115
116 // This is used to return unused memory from a retired promotion LAB
117 size_t unexpend_promoted(size_t decrement);
118
119 // This is used on the allocation path to gate promotions that would exceed the reserve
120 size_t get_promoted_expended() const;
121
122 // Test if there is enough memory reserved for this promotion
123 bool can_promote(size_t requested_bytes) const {
124 size_t promotion_avail = get_promoted_reserve();
125 size_t promotion_expended = get_promoted_expended();
126 return promotion_expended + requested_bytes <= promotion_avail;
127 }
128
129 // Test if there is enough memory available in the old generation to accommodate this request.
130 // The request will be subject to constraints on promotion and evacuation reserves.
131 bool can_allocate(const ShenandoahAllocRequest& req) const;
132
133 // Updates the promotion expenditure tracking and configures whether the plab may be used
134 // for promotions and evacuations, or just evacuations.
135 void configure_plab_for_current_thread(const ShenandoahAllocRequest &req);
136
137 // See description in field declaration
138 void set_region_balance(ssize_t balance) { _region_balance = balance; }
139 ssize_t get_region_balance() const { return _region_balance; }
140 // See description in field declaration
141 void set_promotion_potential(size_t val) { _promotion_potential = val; };
142 size_t get_promotion_potential() const { return _promotion_potential; };
143
144 // See description in field declaration
145 void set_pad_for_promote_in_place(size_t pad) { _pad_for_promote_in_place = pad; }
146 size_t get_pad_for_promote_in_place() const { return _pad_for_promote_in_place; }
147
148 // See description in field declaration
149 void set_expected_humongous_region_promotions(size_t region_count) { _promotable_humongous_regions = region_count; }
150 void set_expected_regular_region_promotions(size_t region_count) { _promotable_regular_regions = region_count; }
151 size_t get_expected_in_place_promotions() const { return _promotable_humongous_regions + _promotable_regular_regions; }
152 bool has_in_place_promotions() const { return get_expected_in_place_promotions() > 0; }
153
154 // Class unloading may render the card table offsets unusable, if they refer to unmarked objects
155 bool is_parsable() const { return _is_parsable; }
156 void set_parsable(bool parsable);
157
158 // This will signal the heuristic to trigger an old generation collection
159 void handle_failed_transfer();
160
161 // This will signal the control thread to run a full GC instead of a futile degenerated gc
162 void handle_failed_evacuation();
163
164 // This logs that an evacuation to the old generation has failed
165 void handle_failed_promotion(Thread* thread, size_t size);
166
167 // A successful evacuation re-dirties the cards and registers the object with the remembered set
168 void handle_evacuation(HeapWord* obj, size_t words, bool promotion);
169
170 // Clear the flag after it is consumed by the control thread
171 bool clear_failed_evacuation() {
172 return _failed_evacuation.try_unset();
173 }
174
175 // Transition to the next state after mixed evacuations have completed
176 void complete_mixed_evacuations();
177
178 // Abandon any future mixed collections. This is invoked when all old regions eligible for
179 // inclusion in a mixed evacuation are pinned. This should be rare.
180 void abandon_mixed_evacuations();
181
182 private:
183 ShenandoahScanRemembered* _card_scan;
184
185 public:
186 ShenandoahScanRemembered* card_scan() { return _card_scan; }
187
188 // Clear cards for given region
189 void clear_cards_for(ShenandoahHeapRegion* region);
190
191 // Mark card for this location as dirty
192 void mark_card_as_dirty(void* location);
193
194 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) override;
195
196 void parallel_heap_region_iterate_free(ShenandoahHeapRegionClosure* cl) override;
197
198 void heap_region_iterate(ShenandoahHeapRegionClosure* cl) override;
199
200 bool contains(ShenandoahAffiliation affiliation) const override;
201 bool contains(ShenandoahHeapRegion* region) const override;
202 bool contains(oop obj) const override;
203
204 void set_concurrent_mark_in_progress(bool in_progress) override;
205 bool is_concurrent_mark_in_progress() override;
206
207 bool entry_coalesce_and_fill();
208 void prepare_for_mixed_collections_after_global_gc();
209 void prepare_gc() override;
210 void prepare_regions_and_collection_set(bool concurrent) override;
211 void record_success_concurrent(bool abbreviated) override;
212 void cancel_marking() override;
213
214 // Cancels old gc and transitions to the idle state
215 void cancel_gc();
216
217 // We leave the SATB barrier on for the entirety of the old generation
218 // marking phase. In some cases, this can cause a write to a perfectly
219 // reachable oop to enqueue a pointer that later becomes garbage (because
220 // it points at an object that is later chosen for the collection set). There are
221 // also cases where the referent of a weak reference ends up in the SATB
222 // and is later collected. In these cases the oop in the SATB buffer becomes
223 // invalid and the _next_ cycle will crash during its marking phase. To
224 // avoid this problem, we "purge" the SATB buffers during the final update
225 // references phase if (and only if) an old generation mark is in progress.
226 // At this stage we can safely determine if any of the oops in the SATB
227 // buffer belong to trashed regions (before they are recycled). As it
228 // happens, flushing a SATB queue also filters out oops which have already
229 // been marked - which is the case for anything that is being evacuated
230 // from the collection set.
231 //
232 // Alternatively, we could inspect the state of the heap and the age of the
233 // object at the barrier, but we reject this approach because it is likely
234 // the performance impact would be too severe.
235 void transfer_pointers_from_satb() const;
236 void concurrent_transfer_pointers_from_satb() const;
237
238 // True if there are old regions waiting to be selected for a mixed collection
239 bool has_unprocessed_collection_candidates();
240
241 bool is_doing_mixed_evacuations() const {
242 return state() == EVACUATING || state() == EVACUATING_AFTER_GLOBAL;
243 }
244
245 bool is_preparing_for_mark() const {
246 return state() == FILLING;
247 }
248
249 bool is_idle() const {
250 return state() == WAITING_FOR_BOOTSTRAP;
251 }
252
253 bool is_bootstrapping() const {
254 return state() == BOOTSTRAPPING;
255 }
256
257 // Amount of live memory (bytes) in regions waiting for mixed collections
258 size_t unprocessed_collection_candidates_live_memory();
259
260 // Abandon any regions waiting for mixed collections
261 void abandon_collection_candidates();
262
263 public:
264 enum State {
265 FILLING, WAITING_FOR_BOOTSTRAP, BOOTSTRAPPING, MARKING, EVACUATING, EVACUATING_AFTER_GLOBAL
266 };
267
268 #ifdef ASSERT
269 bool validate_waiting_for_bootstrap();
270 #endif
271
272 private:
273 State _state;
274
275 static const size_t FRACTIONAL_DENOMINATOR = 65536;
276
277 // During initialization of the JVM, we search for the correct old-gen size by initially performing old-gen
278 // collection when old-gen usage is 50% more (INITIAL_GROWTH_BEFORE_COMPACTION) than the initial old-gen size
279 // estimate (3.125% of heap). The next old-gen trigger occurs when old-gen grows 25% larger than its live
280 // memory at the end of the first old-gen collection. Then we trigger again when old-gen grows 12.5%
281 // more than its live memory at the end of the previous old-gen collection. Thereafter, we trigger each time
282 // old-gen grows more than 12.5% following the end of its previous old-gen collection.
283 static const size_t INITIAL_GROWTH_BEFORE_COMPACTION = FRACTIONAL_DENOMINATOR / 2; // 50.0%
284
285 // INITIAL_LIVE_FRACTION represents the initial guess of how large old-gen should be. We estimate that old-gen
286 // needs to consume 6.25% of the total heap size. And we "pretend" that we start out with this amount of live
287 // old-gen memory. The first old-collection trigger will occur when old-gen occupies 50% more than this initial
288 // approximation of the old-gen memory requirement, in other words when old-gen usage is 150% of 6.25%, which
289 // is 9.375% of the total heap size.
290 static const uint16_t INITIAL_LIVE_FRACTION = FRACTIONAL_DENOMINATOR / 16; // 6.25%
291
292 size_t _live_bytes_after_last_mark;
293
294 // How much growth in usage before we trigger old collection, per FRACTIONAL_DENOMINATOR (65_536)
295 size_t _growth_before_compaction;
296 const size_t _min_growth_before_compaction; // Default is 12.5%
297
298 void validate_transition(State new_state) NOT_DEBUG_RETURN;
299
300 public:
301 State state() const {
302 return _state;
303 }
304
305 const char* state_name() const {
306 return state_name(_state);
307 }
308
309 void transition_to(State new_state);
310
311 size_t get_live_bytes_after_last_mark() const;
312 void set_live_bytes_after_last_mark(size_t new_live);
313
314 size_t usage_trigger_threshold() const;
315
316 bool can_start_gc() {
317 return _state == WAITING_FOR_BOOTSTRAP;
318 }
319
320 static const char* state_name(State state);
321
322 };
323
324
325 #endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP