1 /*
2 * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
31 #include "gc/shenandoah/shenandoahUtils.hpp"
32 #include "runtime/atomic.hpp"
33 #include "services/memTracker.hpp"
34 #include "utilities/copy.hpp"
35
36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
37 _map_size(heap->num_regions()),
38 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
39 _map_space(space),
40 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
41 _biased_cset_map(_map_space.base()),
42 _heap(heap),
43 _garbage(0),
44 _used(0),
45 _region_count(0),
46 _current_index(0) {
47
48 // The collection set map is reserved to cover the entire heap *and* zero addresses.
49 // This is needed to accept in-cset checks for both heap oops and nulls, freeing
50 // high-performance code from checking for null first.
51 //
52 // Since heap_base can be far away, committing the entire map would waste memory.
53 // Therefore, we only commit the parts that are needed to operate: the heap view,
54 // and the zero page.
55 //
56 // Note: we could instead commit the entire map, and piggyback on OS virtual memory
57 // subsystem for mapping not-yet-written-to pages to a single physical backing page,
58 // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
59
60 MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
61
62 size_t page_size = os::vm_page_size();
63
64 if (!_map_space.special()) {
65 // Commit entire pages that cover the heap cset map.
66 char* bot_addr = align_down(_cset_map, page_size);
67 char* top_addr = align_up(_cset_map + _map_size, page_size);
68 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
69 "Unable to commit collection set bitmap: heap");
70
71 // Commit the zero page, if not yet covered by heap cset map.
72 if (bot_addr != _biased_cset_map) {
73 os::commit_memory_or_exit(_biased_cset_map, page_size, false,
74 "Unable to commit collection set bitmap: zero page");
75 }
76 }
77
78 Copy::zero_to_bytes(_cset_map, _map_size);
79 Copy::zero_to_bytes(_biased_cset_map, page_size);
80 }
81
82 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
83 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
84 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
85 assert(!is_in(r), "Already in collection set");
86 _cset_map[r->index()] = 1;
87 _region_count++;
88 _garbage += r->garbage();
89 _used += r->used();
90
91 // Update the region status too. State transition would be checked internally.
92 r->make_cset();
93 }
94
95 void ShenandoahCollectionSet::clear() {
96 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
97 Copy::zero_to_bytes(_cset_map, _map_size);
98
99 #ifdef ASSERT
100 for (size_t index = 0; index < _heap->num_regions(); index ++) {
101 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
102 }
103 #endif
104
105 _garbage = 0;
106 _used = 0;
107
108 _region_count = 0;
109 _current_index = 0;
110 }
111
112 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
113 // This code is optimized for the case when collection set contains only
114 // a few regions. In this case, it is more constructive to check for is_in
115 // before hitting the (potentially contended) atomic index.
116
117 size_t max = _heap->num_regions();
118 size_t old = Atomic::load(&_current_index);
119
120 for (size_t index = old; index < max; index++) {
121 if (is_in(index)) {
122 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
123 assert(cur >= old, "Always move forward");
124 if (cur == old) {
125 // Successfully moved the claim index, this is our region.
126 return _heap->get_region(index);
127 } else {
128 // Somebody else moved the claim index, restart from there.
129 index = cur - 1; // adjust for loop post-increment
133 }
134 return nullptr;
135 }
136
137 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
138 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
139 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
140
141 size_t max = _heap->num_regions();
142 for (size_t index = _current_index; index < max; index++) {
143 if (is_in(index)) {
144 _current_index = index + 1;
145 return _heap->get_region(index);
146 }
147 }
148
149 return nullptr;
150 }
151
152 void ShenandoahCollectionSet::print_on(outputStream* out) const {
153 out->print_cr("Collection Set : " SIZE_FORMAT "", count());
154
155 debug_only(size_t regions = 0;)
156 for (size_t index = 0; index < _heap->num_regions(); index ++) {
157 if (is_in(index)) {
158 _heap->get_region(index)->print_on(out);
159 debug_only(regions ++;)
160 }
161 }
162 assert(regions == count(), "Must match");
163 }
|
1 /*
2 * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
32 #include "gc/shenandoah/shenandoahUtils.hpp"
33 #include "runtime/atomic.hpp"
34 #include "services/memTracker.hpp"
35 #include "utilities/copy.hpp"
36
37 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
38 _map_size(heap->num_regions()),
39 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
40 _map_space(space),
41 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
42 _biased_cset_map(_map_space.base()),
43 _heap(heap),
44 _has_old_regions(false),
45 _garbage(0),
46 _used(0),
47 _live(0),
48 _region_count(0),
49 _old_garbage(0),
50 _current_index(0) {
51
52 // The collection set map is reserved to cover the entire heap *and* zero addresses.
53 // This is needed to accept in-cset checks for both heap oops and nulls, freeing
54 // high-performance code from checking for null first.
55 //
56 // Since heap_base can be far away, committing the entire map would waste memory.
57 // Therefore, we only commit the parts that are needed to operate: the heap view,
58 // and the zero page.
59 //
60 // Note: we could instead commit the entire map, and piggyback on OS virtual memory
61 // subsystem for mapping not-yet-written-to pages to a single physical backing page,
62 // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
63
64 MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
65
66 size_t page_size = os::vm_page_size();
67
68 if (!_map_space.special()) {
69 // Commit entire pages that cover the heap cset map.
70 char* bot_addr = align_down(_cset_map, page_size);
71 char* top_addr = align_up(_cset_map + _map_size, page_size);
72 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
73 "Unable to commit collection set bitmap: heap");
74
75 // Commit the zero page, if not yet covered by heap cset map.
76 if (bot_addr != _biased_cset_map) {
77 os::commit_memory_or_exit(_biased_cset_map, page_size, false,
78 "Unable to commit collection set bitmap: zero page");
79 }
80 }
81
82 Copy::zero_to_bytes(_cset_map, _map_size);
83 Copy::zero_to_bytes(_biased_cset_map, page_size);
84 }
85
86 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
87 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
88 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
89 assert(!is_in(r), "Already in collection set");
90 assert(!r->is_humongous(), "Only add regular regions to the collection set");
91
92 _cset_map[r->index()] = 1;
93 size_t live = r->get_live_data_bytes();
94 size_t garbage = r->garbage();
95 size_t free = r->free();
96 if (r->is_young()) {
97 _young_region_count++;
98 _young_bytes_to_evacuate += live;
99 _young_available_bytes_collected += free;
100 if (ShenandoahHeap::heap()->mode()->is_generational() && r->age() >= ShenandoahHeap::heap()->age_census()->tenuring_threshold()) {
101 _young_bytes_to_promote += live;
102 }
103 } else if (r->is_old()) {
104 _old_region_count++;
105 _old_bytes_to_evacuate += live;
106 _old_garbage += garbage;
107 _old_available_bytes_collected += free;
108 }
109
110 _region_count++;
111 _has_old_regions |= r->is_old();
112 _garbage += garbage;
113 _used += r->used();
114 _live += live;
115 // Update the region status too. State transition would be checked internally.
116 r->make_cset();
117 }
118
119 void ShenandoahCollectionSet::clear() {
120 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
121
122 Copy::zero_to_bytes(_cset_map, _map_size);
123
124 #ifdef ASSERT
125 for (size_t index = 0; index < _heap->num_regions(); index ++) {
126 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
127 }
128 #endif
129
130 _garbage = 0;
131 _old_garbage = 0;
132 _used = 0;
133 _live = 0;
134
135 _region_count = 0;
136 _current_index = 0;
137
138 _young_region_count = 0;
139 _young_bytes_to_evacuate = 0;
140 _young_bytes_to_promote = 0;
141
142 _old_region_count = 0;
143 _old_bytes_to_evacuate = 0;
144
145 _young_available_bytes_collected = 0;
146 _old_available_bytes_collected = 0;
147
148 _has_old_regions = false;
149 }
150
151 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
152 // This code is optimized for the case when collection set contains only
153 // a few regions. In this case, it is more constructive to check for is_in
154 // before hitting the (potentially contended) atomic index.
155
156 size_t max = _heap->num_regions();
157 size_t old = Atomic::load(&_current_index);
158
159 for (size_t index = old; index < max; index++) {
160 if (is_in(index)) {
161 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
162 assert(cur >= old, "Always move forward");
163 if (cur == old) {
164 // Successfully moved the claim index, this is our region.
165 return _heap->get_region(index);
166 } else {
167 // Somebody else moved the claim index, restart from there.
168 index = cur - 1; // adjust for loop post-increment
172 }
173 return nullptr;
174 }
175
176 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
177 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
178 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
179
180 size_t max = _heap->num_regions();
181 for (size_t index = _current_index; index < max; index++) {
182 if (is_in(index)) {
183 _current_index = index + 1;
184 return _heap->get_region(index);
185 }
186 }
187
188 return nullptr;
189 }
190
191 void ShenandoahCollectionSet::print_on(outputStream* out) const {
192 out->print_cr("Collection Set: Regions: "
193 SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(),
194 byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()),
195 byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()),
196 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
197
198 debug_only(size_t regions = 0;)
199 for (size_t index = 0; index < _heap->num_regions(); index ++) {
200 if (is_in(index)) {
201 _heap->get_region(index)->print_on(out);
202 debug_only(regions ++;)
203 }
204 }
205 assert(regions == count(), "Must match");
206 }
|