1 /*
2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
27
28 #include "gc/shared/gc_globals.hpp"
29 #include "gc/shared/spaceDecorator.hpp"
30 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
31 #include "gc/shenandoah/shenandoahAsserts.hpp"
32 #include "gc/shenandoah/shenandoahHeap.hpp"
33 #include "gc/shenandoah/shenandoahPadding.hpp"
34 #include "utilities/sizes.hpp"
35
36 class VMStructs;
37 class ShenandoahHeapRegionStateConstant;
38
39 class ShenandoahHeapRegion {
40 friend class VMStructs;
41 friend class ShenandoahHeapRegionStateConstant;
42 private:
43 /*
44 Region state is described by a state machine. Transitions are guarded by
45 heap lock, which allows changing the state of several regions atomically.
46 Region states can be logically aggregated in groups.
47
48 "Empty":
49 .................................................................
105 h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should
106 follow associated humongous starts, not pinnable/movable by themselves);
107 i) Empty cannot go Trash, avoiding useless work;
108 j) ...
109 */
110
111 enum RegionState {
112 _empty_uncommitted, // region is empty and has memory uncommitted
113 _empty_committed, // region is empty and has memory committed
114 _regular, // region is for regular allocations
115 _humongous_start, // region is the humongous start
116 _humongous_cont, // region is the humongous continuation
117 _pinned_humongous_start, // region is both humongous start and pinned
118 _cset, // region is in collection set
119 _pinned, // region is pinned
120 _pinned_cset, // region is pinned and in cset (evac failure path)
121 _trash, // region contains only trash
122 _REGION_STATES_NUM // last
123 };
124
125 static const char* region_state_to_string(RegionState s) {
126 switch (s) {
127 case _empty_uncommitted: return "Empty Uncommitted";
128 case _empty_committed: return "Empty Committed";
129 case _regular: return "Regular";
130 case _humongous_start: return "Humongous Start";
131 case _humongous_cont: return "Humongous Continuation";
132 case _pinned_humongous_start: return "Humongous Start, Pinned";
133 case _cset: return "Collection Set";
134 case _pinned: return "Pinned";
135 case _pinned_cset: return "Collection Set, Pinned";
136 case _trash: return "Trash";
137 default:
138 ShouldNotReachHere();
139 return "";
140 }
141 }
142
143 // This method protects from accidental changes in enum order:
144 int region_state_to_ordinal(RegionState s) const {
145 switch (s) {
146 case _empty_uncommitted: return 0;
147 case _empty_committed: return 1;
148 case _regular: return 2;
149 case _humongous_start: return 3;
150 case _humongous_cont: return 4;
151 case _cset: return 5;
152 case _pinned: return 6;
153 case _trash: return 7;
154 case _pinned_cset: return 8;
155 case _pinned_humongous_start: return 9;
156 default:
157 ShouldNotReachHere();
158 return -1;
159 }
160 }
161
162 void report_illegal_transition(const char* method);
163
164 public:
165 static int region_states_num() {
166 return _REGION_STATES_NUM;
167 }
168
169 // Allowed transitions from the outside code:
170 void make_regular_allocation();
171 void make_regular_bypass();
172 void make_humongous_start();
173 void make_humongous_cont();
174 void make_humongous_start_bypass();
175 void make_humongous_cont_bypass();
176 void make_pinned();
177 void make_unpinned();
178 void make_cset();
179 void make_trash();
180 void make_trash_immediate();
181 void make_empty();
182 void make_uncommitted();
183 void make_committed_bypass();
184
185 // Individual states:
186 bool is_empty_uncommitted() const { return _state == _empty_uncommitted; }
187 bool is_empty_committed() const { return _state == _empty_committed; }
188 bool is_regular() const { return _state == _regular; }
189 bool is_humongous_continuation() const { return _state == _humongous_cont; }
190
191 // Participation in logical groups:
192 bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); }
193 bool is_active() const { return !is_empty() && !is_trash(); }
194 bool is_trash() const { return _state == _trash; }
195 bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; }
196 bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); }
197 bool is_committed() const { return !is_empty_uncommitted(); }
198 bool is_cset() const { return _state == _cset || _state == _pinned_cset; }
199 bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; }
200
201 // Macro-properties:
202 bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; }
203 bool is_stw_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
204
205 RegionState state() const { return _state; }
206 int state_ordinal() const { return region_state_to_ordinal(_state); }
207
208 void record_pin();
209 void record_unpin();
210 size_t pin_count() const;
211
212 private:
213 static size_t RegionCount;
214 static size_t RegionSizeBytes;
215 static size_t RegionSizeWords;
216 static size_t RegionSizeBytesShift;
217 static size_t RegionSizeWordsShift;
218 static size_t RegionSizeBytesMask;
219 static size_t RegionSizeWordsMask;
220 static size_t HumongousThresholdBytes;
221 static size_t HumongousThresholdWords;
222 static size_t MaxTLABSizeBytes;
223 static size_t MaxTLABSizeWords;
224
225 // Never updated fields
226 size_t const _index;
227 HeapWord* const _bottom;
228 HeapWord* const _end;
229
230 // Rarely updated fields
231 HeapWord* _new_top;
232 double _empty_time;
233
234 // Seldom updated fields
235 RegionState _state;
236
237 // Frequently updated fields
238 HeapWord* _top;
239
240 size_t _tlab_allocs;
241 size_t _gclab_allocs;
242
243 volatile size_t _live_data;
244 volatile size_t _critical_pins;
245
246 HeapWord* volatile _update_watermark;
247
248 public:
249 ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed);
250
251 static const size_t MIN_NUM_REGIONS = 10;
252
253 // Return adjusted max heap size
254 static size_t setup_sizes(size_t max_heap_size);
255
256 double empty_time() {
257 return _empty_time;
258 }
259
260 inline static size_t required_regions(size_t bytes) {
261 return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift();
262 }
263
264 inline static size_t region_count() {
265 return ShenandoahHeapRegion::RegionCount;
266 }
267
316 inline static size_t humongous_threshold_bytes() {
317 return ShenandoahHeapRegion::HumongousThresholdBytes;
318 }
319
320 inline static size_t humongous_threshold_words() {
321 return ShenandoahHeapRegion::HumongousThresholdWords;
322 }
323
324 inline static size_t max_tlab_size_bytes() {
325 return ShenandoahHeapRegion::MaxTLABSizeBytes;
326 }
327
328 inline static size_t max_tlab_size_words() {
329 return ShenandoahHeapRegion::MaxTLABSizeWords;
330 }
331
332 inline size_t index() const {
333 return _index;
334 }
335
336 // Allocation (return null if full)
337 inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type);
338
339 inline void clear_live_data();
340 void set_live_data(size_t s);
341
342 // Increase live data for newly allocated region
343 inline void increase_live_data_alloc_words(size_t s);
344
345 // Increase live data for region scanned with GC
346 inline void increase_live_data_gc_words(size_t s);
347
348 inline bool has_live() const;
349 inline size_t get_live_data_bytes() const;
350 inline size_t get_live_data_words() const;
351
352 inline size_t garbage() const;
353
354 void print_on(outputStream* st) const;
355
356 void recycle();
357
358 void oop_iterate(OopIterateClosure* cl);
359
360 HeapWord* block_start(const void* p) const;
361 size_t block_size(const HeapWord* p) const;
362 bool block_is_obj(const HeapWord* p) const { return p < top(); }
363
364 // Find humongous start region that this region belongs to
365 ShenandoahHeapRegion* humongous_start_region() const;
366
367 HeapWord* top() const { return _top; }
368 void set_top(HeapWord* v) { _top = v; }
369
370 HeapWord* new_top() const { return _new_top; }
371 void set_new_top(HeapWord* v) { _new_top = v; }
372
373 HeapWord* bottom() const { return _bottom; }
374 HeapWord* end() const { return _end; }
375
376 size_t capacity() const { return byte_size(bottom(), end()); }
377 size_t used() const { return byte_size(bottom(), top()); }
378 size_t free() const { return byte_size(top(), end()); }
379
380 inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
381 void reset_alloc_metadata();
382 size_t get_shared_allocs() const;
383 size_t get_tlab_allocs() const;
384 size_t get_gclab_allocs() const;
385
386 inline HeapWord* get_update_watermark() const;
387 inline void set_update_watermark(HeapWord* w);
388 inline void set_update_watermark_at_safepoint(HeapWord* w);
389
390 private:
391 void do_commit();
392 void do_uncommit();
393
394 void oop_iterate_objects(OopIterateClosure* cl);
395 void oop_iterate_humongous(OopIterateClosure* cl);
396
397 inline void internal_increase_live_data(size_t s);
398
399 void set_state(RegionState to);
400 };
401
402 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
|
1 /*
2 * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
28
29 #include "gc/shared/gc_globals.hpp"
30 #include "gc/shared/spaceDecorator.hpp"
31 #include "gc/shenandoah/shenandoahAffiliation.hpp"
32 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
33 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
34 #include "gc/shenandoah/shenandoahAsserts.hpp"
35 #include "gc/shenandoah/shenandoahHeap.hpp"
36 #include "gc/shenandoah/shenandoahPadding.hpp"
37 #include "utilities/sizes.hpp"
38
39 class VMStructs;
40 class ShenandoahHeapRegionStateConstant;
41
42 class ShenandoahHeapRegion {
43 friend class VMStructs;
44 friend class ShenandoahHeapRegionStateConstant;
45 private:
46 /*
47 Region state is described by a state machine. Transitions are guarded by
48 heap lock, which allows changing the state of several regions atomically.
49 Region states can be logically aggregated in groups.
50
51 "Empty":
52 .................................................................
108 h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should
109 follow associated humongous starts, not pinnable/movable by themselves);
110 i) Empty cannot go Trash, avoiding useless work;
111 j) ...
112 */
113
114 enum RegionState {
115 _empty_uncommitted, // region is empty and has memory uncommitted
116 _empty_committed, // region is empty and has memory committed
117 _regular, // region is for regular allocations
118 _humongous_start, // region is the humongous start
119 _humongous_cont, // region is the humongous continuation
120 _pinned_humongous_start, // region is both humongous start and pinned
121 _cset, // region is in collection set
122 _pinned, // region is pinned
123 _pinned_cset, // region is pinned and in cset (evac failure path)
124 _trash, // region contains only trash
125 _REGION_STATES_NUM // last
126 };
127
128 public:
129 static const char* region_state_to_string(RegionState s) {
130 switch (s) {
131 case _empty_uncommitted: return "Empty Uncommitted";
132 case _empty_committed: return "Empty Committed";
133 case _regular: return "Regular";
134 case _humongous_start: return "Humongous Start";
135 case _humongous_cont: return "Humongous Continuation";
136 case _pinned_humongous_start: return "Humongous Start, Pinned";
137 case _cset: return "Collection Set";
138 case _pinned: return "Pinned";
139 case _pinned_cset: return "Collection Set, Pinned";
140 case _trash: return "Trash";
141 default:
142 ShouldNotReachHere();
143 return "";
144 }
145 }
146
147 private:
148 // This method protects from accidental changes in enum order:
149 int region_state_to_ordinal(RegionState s) const {
150 switch (s) {
151 case _empty_uncommitted: return 0;
152 case _empty_committed: return 1;
153 case _regular: return 2;
154 case _humongous_start: return 3;
155 case _humongous_cont: return 4;
156 case _cset: return 5;
157 case _pinned: return 6;
158 case _trash: return 7;
159 case _pinned_cset: return 8;
160 case _pinned_humongous_start: return 9;
161 default:
162 ShouldNotReachHere();
163 return -1;
164 }
165 }
166
167 void report_illegal_transition(const char* method);
168
169 public:
170 static int region_states_num() {
171 return _REGION_STATES_NUM;
172 }
173
174 // Allowed transitions from the outside code:
175 void make_regular_allocation(ShenandoahAffiliation affiliation);
176 void make_young_maybe();
177 void make_regular_bypass();
178 void make_humongous_start();
179 void make_humongous_cont();
180 void make_humongous_start_bypass(ShenandoahAffiliation affiliation);
181 void make_humongous_cont_bypass(ShenandoahAffiliation affiliation);
182 void make_pinned();
183 void make_unpinned();
184 void make_cset();
185 void make_trash();
186 void make_trash_immediate();
187 void make_empty();
188 void make_uncommitted();
189 void make_committed_bypass();
190
191 // Individual states:
192 bool is_empty_uncommitted() const { return _state == _empty_uncommitted; }
193 bool is_empty_committed() const { return _state == _empty_committed; }
194 bool is_regular() const { return _state == _regular; }
195 bool is_humongous_continuation() const { return _state == _humongous_cont; }
196
197 // Participation in logical groups:
198 bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); }
199 bool is_active() const { return !is_empty() && !is_trash(); }
200 bool is_trash() const { return _state == _trash; }
201 bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; }
202 bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); }
203 bool is_committed() const { return !is_empty_uncommitted(); }
204 bool is_cset() const { return _state == _cset || _state == _pinned_cset; }
205 bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; }
206 bool is_regular_pinned() const { return _state == _pinned; }
207
208 inline bool is_young() const;
209 inline bool is_old() const;
210 inline bool is_affiliated() const;
211
212 // Macro-properties:
213 bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; }
214 bool is_stw_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); }
215
216 RegionState state() const { return _state; }
217 int state_ordinal() const { return region_state_to_ordinal(_state); }
218
219 void record_pin();
220 void record_unpin();
221 size_t pin_count() const;
222
223 private:
224 static size_t RegionCount;
225 static size_t RegionSizeBytes;
226 static size_t RegionSizeWords;
227 static size_t RegionSizeBytesShift;
228 static size_t RegionSizeWordsShift;
229 static size_t RegionSizeBytesMask;
230 static size_t RegionSizeWordsMask;
231 static size_t HumongousThresholdBytes;
232 static size_t HumongousThresholdWords;
233 static size_t MaxTLABSizeBytes;
234 static size_t MaxTLABSizeWords;
235
236 // Never updated fields
237 size_t const _index;
238 HeapWord* const _bottom;
239 HeapWord* const _end;
240
241 // Rarely updated fields
242 HeapWord* _new_top;
243 double _empty_time;
244
245 HeapWord* _top_before_promoted;
246
247 // Seldom updated fields
248 RegionState _state;
249 HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates.
250
251 // Frequently updated fields
252 HeapWord* _top;
253
254 size_t _tlab_allocs;
255 size_t _gclab_allocs;
256 size_t _plab_allocs;
257
258 volatile size_t _live_data;
259 volatile size_t _critical_pins;
260
261 HeapWord* volatile _update_watermark;
262
263 uint _age;
264 CENSUS_NOISE(uint _youth;) // tracks epochs of retrograde ageing (rejuvenation)
265
266 public:
267 ShenandoahHeapRegion(HeapWord* start, size_t index, bool committed);
268
269 static const size_t MIN_NUM_REGIONS = 10;
270
271 // Return adjusted max heap size
272 static size_t setup_sizes(size_t max_heap_size);
273
274 double empty_time() {
275 return _empty_time;
276 }
277
278 inline static size_t required_regions(size_t bytes) {
279 return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift();
280 }
281
282 inline static size_t region_count() {
283 return ShenandoahHeapRegion::RegionCount;
284 }
285
334 inline static size_t humongous_threshold_bytes() {
335 return ShenandoahHeapRegion::HumongousThresholdBytes;
336 }
337
338 inline static size_t humongous_threshold_words() {
339 return ShenandoahHeapRegion::HumongousThresholdWords;
340 }
341
342 inline static size_t max_tlab_size_bytes() {
343 return ShenandoahHeapRegion::MaxTLABSizeBytes;
344 }
345
346 inline static size_t max_tlab_size_words() {
347 return ShenandoahHeapRegion::MaxTLABSizeWords;
348 }
349
350 inline size_t index() const {
351 return _index;
352 }
353
354 inline void save_top_before_promote();
355 inline HeapWord* get_top_before_promote() const { return _top_before_promoted; }
356 inline void restore_top_before_promote();
357 inline size_t garbage_before_padded_for_promote() const;
358
359 // If next available memory is not aligned on address that is multiple of alignment, fill the empty space
360 // so that returned object is aligned on an address that is a multiple of alignment_in_bytes. Requested
361 // size is in words. It is assumed that this->is_old(). A pad object is allocated, filled, and registered
362 // if necessary to assure the new allocation is properly aligned. Return nullptr if memory is not available.
363 inline HeapWord* allocate_aligned(size_t word_size, ShenandoahAllocRequest &req, size_t alignment_in_bytes);
364
365 // Allocation (return nullptr if full)
366 inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest req);
367
368 inline void clear_live_data();
369 void set_live_data(size_t s);
370
371 // Increase live data for newly allocated region
372 inline void increase_live_data_alloc_words(size_t s);
373
374 // Increase live data for region scanned with GC
375 inline void increase_live_data_gc_words(size_t s);
376
377 inline bool has_live() const;
378 inline size_t get_live_data_bytes() const;
379 inline size_t get_live_data_words() const;
380
381 inline size_t garbage() const;
382
383 void print_on(outputStream* st) const;
384
385 void recycle();
386
387 inline void begin_preemptible_coalesce_and_fill() {
388 _coalesce_and_fill_boundary = _bottom;
389 }
390
391 inline void end_preemptible_coalesce_and_fill() {
392 _coalesce_and_fill_boundary = _end;
393 }
394
395 inline void suspend_coalesce_and_fill(HeapWord* next_focus) {
396 _coalesce_and_fill_boundary = next_focus;
397 }
398
399 inline HeapWord* resume_coalesce_and_fill() {
400 return _coalesce_and_fill_boundary;
401 }
402
403 // Coalesce contiguous spans of garbage objects by filling header and reregistering start locations with remembered set.
404 // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parsable. Return true iff
405 // region is completely coalesced and filled. Returns false if cancelled before task is complete.
406 bool oop_coalesce_and_fill(bool cancellable);
407
408 // Invoke closure on every reference contained within the humongous object that spans this humongous
409 // region if the reference is contained within a DIRTY card and the reference is no more than words following
410 // start within the humongous object.
411 void oop_iterate_humongous_slice(OopIterateClosure* cl, bool dirty_only, HeapWord* start, size_t words, bool write_table);
412
413 HeapWord* block_start(const void* p) const;
414 size_t block_size(const HeapWord* p) const;
415 bool block_is_obj(const HeapWord* p) const { return p < top(); }
416
417 // Find humongous start region that this region belongs to
418 ShenandoahHeapRegion* humongous_start_region() const;
419
420 HeapWord* top() const { return _top; }
421 void set_top(HeapWord* v) { _top = v; }
422
423 HeapWord* new_top() const { return _new_top; }
424 void set_new_top(HeapWord* v) { _new_top = v; }
425
426 HeapWord* bottom() const { return _bottom; }
427 HeapWord* end() const { return _end; }
428
429 size_t capacity() const { return byte_size(bottom(), end()); }
430 size_t used() const { return byte_size(bottom(), top()); }
431 size_t used_before_promote() const { return byte_size(bottom(), get_top_before_promote()); }
432 size_t free() const { return byte_size(top(), end()); }
433
434 // Does this region contain this address?
435 bool contains(HeapWord* p) const {
436 return (bottom() <= p) && (p < top());
437 }
438
439 inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
440 void reset_alloc_metadata();
441 size_t get_shared_allocs() const;
442 size_t get_tlab_allocs() const;
443 size_t get_gclab_allocs() const;
444 size_t get_plab_allocs() const;
445
446 inline HeapWord* get_update_watermark() const;
447 inline void set_update_watermark(HeapWord* w);
448 inline void set_update_watermark_at_safepoint(HeapWord* w);
449
450 inline ShenandoahAffiliation affiliation() const;
451 inline const char* affiliation_name() const;
452
453 void set_affiliation(ShenandoahAffiliation new_affiliation);
454
455 // Region ageing and rejuvenation
456 uint age() const { return _age; }
457 CENSUS_NOISE(uint youth() const { return _youth; })
458
459 void increment_age() {
460 const uint max_age = markWord::max_age;
461 assert(_age <= max_age, "Error");
462 if (_age++ >= max_age) {
463 _age = max_age; // clamp
464 }
465 }
466
467 void reset_age() {
468 CENSUS_NOISE(_youth += _age;)
469 _age = 0;
470 }
471
472 CENSUS_NOISE(void clear_youth() { _youth = 0; })
473
474 private:
475 void decrement_humongous_waste() const;
476 void do_commit();
477 void do_uncommit();
478
479 inline void internal_increase_live_data(size_t s);
480
481 void set_state(RegionState to);
482 };
483
484 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP
|