1 /*
2 * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
3 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
32 #include "gc/shenandoah/shenandoahUtils.hpp"
33 #include "runtime/atomic.hpp"
34 #include "nmt/memTracker.hpp"
35 #include "utilities/copy.hpp"
36
37 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
38 _map_size(heap->num_regions()),
39 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
40 _map_space(space),
41 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
42 _biased_cset_map(_map_space.base()),
43 _heap(heap),
44 _garbage(0),
45 _used(0),
46 _region_count(0),
47 _current_index(0) {
48
49 // The collection set map is reserved to cover the entire heap *and* zero addresses.
50 // This is needed to accept in-cset checks for both heap oops and nulls, freeing
51 // high-performance code from checking for null first.
52 //
53 // Since heap_base can be far away, committing the entire map would waste memory.
54 // Therefore, we only commit the parts that are needed to operate: the heap view,
55 // and the zero page.
56 //
57 // Note: we could instead commit the entire map, and piggyback on OS virtual memory
58 // subsystem for mapping not-yet-written-to pages to a single physical backing page,
59 // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
60
61 MemTracker::record_virtual_memory_tag(_map_space.base(), mtGC);
62
63 size_t page_size = os::vm_page_size();
64
65 if (!_map_space.special()) {
66 // Commit entire pages that cover the heap cset map.
67 char* bot_addr = align_down(_cset_map, page_size);
68 char* top_addr = align_up(_cset_map + _map_size, page_size);
69 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
70 "Unable to commit collection set bitmap: heap");
71
72 // Commit the zero page, if not yet covered by heap cset map.
73 if (bot_addr != _biased_cset_map) {
74 os::commit_memory_or_exit(_biased_cset_map, page_size, false,
75 "Unable to commit collection set bitmap: zero page");
76 }
77 }
78
79 Copy::zero_to_bytes(_cset_map, _map_size);
80 Copy::zero_to_bytes(_biased_cset_map, page_size);
81 }
82
83 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
84 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
85 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
86 assert(!is_in(r), "Already in collection set");
87 _cset_map[r->index()] = 1;
88 _region_count++;
89 _garbage += r->garbage();
90 _used += r->used();
91
92 // Update the region status too. State transition would be checked internally.
93 r->make_cset();
94 }
95
96 void ShenandoahCollectionSet::clear() {
97 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
98 Copy::zero_to_bytes(_cset_map, _map_size);
99
100 #ifdef ASSERT
101 for (size_t index = 0; index < _heap->num_regions(); index ++) {
102 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
103 }
104 #endif
105
106 _garbage = 0;
107 _used = 0;
108
109 _region_count = 0;
110 _current_index = 0;
111 }
112
113 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
114 // This code is optimized for the case when collection set contains only
115 // a few regions. In this case, it is more constructive to check for is_in
116 // before hitting the (potentially contended) atomic index.
117
118 size_t max = _heap->num_regions();
119 size_t old = Atomic::load(&_current_index);
120
121 for (size_t index = old; index < max; index++) {
122 if (is_in(index)) {
123 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
124 assert(cur >= old, "Always move forward");
125 if (cur == old) {
126 // Successfully moved the claim index, this is our region.
127 return _heap->get_region(index);
128 } else {
129 // Somebody else moved the claim index, restart from there.
130 index = cur - 1; // adjust for loop post-increment
134 }
135 return nullptr;
136 }
137
138 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
139 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
140 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
141
142 size_t max = _heap->num_regions();
143 for (size_t index = _current_index; index < max; index++) {
144 if (is_in(index)) {
145 _current_index = index + 1;
146 return _heap->get_region(index);
147 }
148 }
149
150 return nullptr;
151 }
152
153 void ShenandoahCollectionSet::print_on(outputStream* out) const {
154 out->print_cr("Collection Set : " SIZE_FORMAT "", count());
155
156 debug_only(size_t regions = 0;)
157 for (size_t index = 0; index < _heap->num_regions(); index ++) {
158 if (is_in(index)) {
159 _heap->get_region(index)->print_on(out);
160 debug_only(regions ++;)
161 }
162 }
163 assert(regions == count(), "Must match");
164 }
|
1 /*
2 * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
3 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28
29 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
30 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
31 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
33 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
34 #include "gc/shenandoah/shenandoahUtils.hpp"
35 #include "runtime/atomic.hpp"
36 #include "nmt/memTracker.hpp"
37 #include "utilities/copy.hpp"
38
39 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
40 _map_size(heap->num_regions()),
41 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
42 _map_space(space),
43 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
44 _biased_cset_map(_map_space.base()),
45 _heap(heap),
46 _has_old_regions(false),
47 _garbage(0),
48 _used(0),
49 _live(0),
50 _region_count(0),
51 _old_garbage(0),
52 _preselected_regions(nullptr),
53 _current_index(0) {
54
55 // The collection set map is reserved to cover the entire heap *and* zero addresses.
56 // This is needed to accept in-cset checks for both heap oops and nulls, freeing
57 // high-performance code from checking for null first.
58 //
59 // Since heap_base can be far away, committing the entire map would waste memory.
60 // Therefore, we only commit the parts that are needed to operate: the heap view,
61 // and the zero page.
62 //
63 // Note: we could instead commit the entire map, and piggyback on OS virtual memory
64 // subsystem for mapping not-yet-written-to pages to a single physical backing page,
65 // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
66
67 MemTracker::record_virtual_memory_tag(_map_space.base(), mtGC);
68
69 size_t page_size = os::vm_page_size();
70
71 if (!_map_space.special()) {
72 // Commit entire pages that cover the heap cset map.
73 char* bot_addr = align_down(_cset_map, page_size);
74 char* top_addr = align_up(_cset_map + _map_size, page_size);
75 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
76 "Unable to commit collection set bitmap: heap");
77
78 // Commit the zero page, if not yet covered by heap cset map.
79 if (bot_addr != _biased_cset_map) {
80 os::commit_memory_or_exit(_biased_cset_map, page_size, false,
81 "Unable to commit collection set bitmap: zero page");
82 }
83 }
84
85 Copy::zero_to_bytes(_cset_map, _map_size);
86 Copy::zero_to_bytes(_biased_cset_map, page_size);
87 }
88
89 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
90 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
91 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
92 assert(!is_in(r), "Already in collection set");
93 assert(!r->is_humongous(), "Only add regular regions to the collection set");
94
95 _cset_map[r->index()] = 1;
96 size_t live = r->get_live_data_bytes();
97 size_t garbage = r->garbage();
98 size_t free = r->free();
99 if (r->is_young()) {
100 _young_bytes_to_evacuate += live;
101 _young_available_bytes_collected += free;
102 if (ShenandoahHeap::heap()->mode()->is_generational() && r->age() >= ShenandoahGenerationalHeap::heap()->age_census()->tenuring_threshold()) {
103 _young_bytes_to_promote += live;
104 }
105 } else if (r->is_old()) {
106 _old_bytes_to_evacuate += live;
107 _old_garbage += garbage;
108 }
109
110 _region_count++;
111 _has_old_regions |= r->is_old();
112 _garbage += garbage;
113 _used += r->used();
114 _live += live;
115 // Update the region status too. State transition would be checked internally.
116 r->make_cset();
117 }
118
119 void ShenandoahCollectionSet::clear() {
120 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
121
122 Copy::zero_to_bytes(_cset_map, _map_size);
123
124 #ifdef ASSERT
125 for (size_t index = 0; index < _heap->num_regions(); index ++) {
126 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
127 }
128 #endif
129
130 _garbage = 0;
131 _old_garbage = 0;
132 _used = 0;
133 _live = 0;
134
135 _region_count = 0;
136 _current_index = 0;
137
138 _young_bytes_to_evacuate = 0;
139 _young_bytes_to_promote = 0;
140 _old_bytes_to_evacuate = 0;
141
142 _young_available_bytes_collected = 0;
143
144 _has_old_regions = false;
145 }
146
147 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
148 // This code is optimized for the case when collection set contains only
149 // a few regions. In this case, it is more constructive to check for is_in
150 // before hitting the (potentially contended) atomic index.
151
152 size_t max = _heap->num_regions();
153 size_t old = Atomic::load(&_current_index);
154
155 for (size_t index = old; index < max; index++) {
156 if (is_in(index)) {
157 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
158 assert(cur >= old, "Always move forward");
159 if (cur == old) {
160 // Successfully moved the claim index, this is our region.
161 return _heap->get_region(index);
162 } else {
163 // Somebody else moved the claim index, restart from there.
164 index = cur - 1; // adjust for loop post-increment
168 }
169 return nullptr;
170 }
171
172 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
173 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
174 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
175
176 size_t max = _heap->num_regions();
177 for (size_t index = _current_index; index < max; index++) {
178 if (is_in(index)) {
179 _current_index = index + 1;
180 return _heap->get_region(index);
181 }
182 }
183
184 return nullptr;
185 }
186
187 void ShenandoahCollectionSet::print_on(outputStream* out) const {
188 out->print_cr("Collection Set: Regions: "
189 SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(),
190 byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()),
191 byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()),
192 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
193
194 debug_only(size_t regions = 0;)
195 for (size_t index = 0; index < _heap->num_regions(); index ++) {
196 if (is_in(index)) {
197 _heap->get_region(index)->print_on(out);
198 debug_only(regions ++;)
199 }
200 }
201 assert(regions == count(), "Must match");
202 }
|