1 /*
2 * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
31 #include "gc/shenandoah/shenandoahUtils.hpp"
32 #include "runtime/atomic.hpp"
33 #include "services/memTracker.hpp"
34 #include "utilities/copy.hpp"
35
36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
37 _map_size(heap->num_regions()),
38 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
39 _map_space(space),
40 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
41 _biased_cset_map(_map_space.base()),
42 _heap(heap),
43 _garbage(0),
44 _used(0),
45 _region_count(0),
46 _current_index(0) {
47
48 // The collection set map is reserved to cover the entire heap *and* zero addresses.
49 // This is needed to accept in-cset checks for both heap oops and nulls, freeing
50 // high-performance code from checking for null first.
51 //
52 // Since heap_base can be far away, committing the entire map would waste memory.
53 // Therefore, we only commit the parts that are needed to operate: the heap view,
54 // and the zero page.
55 //
56 // Note: we could instead commit the entire map, and piggyback on OS virtual memory
57 // subsystem for mapping not-yet-written-to pages to a single physical backing page,
58 // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
59
60 MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
61
62 size_t page_size = os::vm_page_size();
63
64 if (!_map_space.special()) {
65 // Commit entire pages that cover the heap cset map.
66 char* bot_addr = align_down(_cset_map, page_size);
67 char* top_addr = align_up(_cset_map + _map_size, page_size);
68 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
69 "Unable to commit collection set bitmap: heap");
70
71 // Commit the zero page, if not yet covered by heap cset map.
72 if (bot_addr != _biased_cset_map) {
73 os::commit_memory_or_exit(_biased_cset_map, page_size, false,
74 "Unable to commit collection set bitmap: zero page");
75 }
76 }
77
78 Copy::zero_to_bytes(_cset_map, _map_size);
79 Copy::zero_to_bytes(_biased_cset_map, page_size);
80 }
81
82 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
83 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
84 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
85 assert(!is_in(r), "Already in collection set");
86 _cset_map[r->index()] = 1;
87 _region_count++;
88 _garbage += r->garbage();
89 _used += r->used();
90
91 // Update the region status too. State transition would be checked internally.
92 r->make_cset();
93 }
94
95 void ShenandoahCollectionSet::clear() {
96 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
97 Copy::zero_to_bytes(_cset_map, _map_size);
98
99 #ifdef ASSERT
100 for (size_t index = 0; index < _heap->num_regions(); index ++) {
101 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
102 }
103 #endif
104
105 _garbage = 0;
106 _used = 0;
107
108 _region_count = 0;
109 _current_index = 0;
110 }
111
112 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
113 // This code is optimized for the case when collection set contains only
114 // a few regions. In this case, it is more constructive to check for is_in
115 // before hitting the (potentially contended) atomic index.
116
117 size_t max = _heap->num_regions();
118 size_t old = Atomic::load(&_current_index);
119
120 for (size_t index = old; index < max; index++) {
121 if (is_in(index)) {
122 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
123 assert(cur >= old, "Always move forward");
124 if (cur == old) {
125 // Successfully moved the claim index, this is our region.
126 return _heap->get_region(index);
127 } else {
128 // Somebody else moved the claim index, restart from there.
129 index = cur - 1; // adjust for loop post-increment
133 }
134 return nullptr;
135 }
136
137 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
138 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
139 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
140
141 size_t max = _heap->num_regions();
142 for (size_t index = _current_index; index < max; index++) {
143 if (is_in(index)) {
144 _current_index = index + 1;
145 return _heap->get_region(index);
146 }
147 }
148
149 return nullptr;
150 }
151
152 void ShenandoahCollectionSet::print_on(outputStream* out) const {
153 out->print_cr("Collection Set : " SIZE_FORMAT "", count());
154
155 debug_only(size_t regions = 0;)
156 for (size_t index = 0; index < _heap->num_regions(); index ++) {
157 if (is_in(index)) {
158 _heap->get_region(index)->print_on(out);
159 debug_only(regions ++;)
160 }
161 }
162 assert(regions == count(), "Must match");
163 }
|
1 /*
2 * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
29 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
31 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
33 #include "gc/shenandoah/shenandoahUtils.hpp"
34 #include "runtime/atomic.hpp"
35 #include "services/memTracker.hpp"
36 #include "utilities/copy.hpp"
37
38 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
39 _map_size(heap->num_regions()),
40 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
41 _map_space(space),
42 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
43 _biased_cset_map(_map_space.base()),
44 _heap(heap),
45 _has_old_regions(false),
46 _garbage(0),
47 _used(0),
48 _live(0),
49 _region_count(0),
50 _old_garbage(0),
51 _preselected_regions(nullptr),
52 _current_index(0) {
53
54 // The collection set map is reserved to cover the entire heap *and* zero addresses.
55 // This is needed to accept in-cset checks for both heap oops and nulls, freeing
56 // high-performance code from checking for null first.
57 //
58 // Since heap_base can be far away, committing the entire map would waste memory.
59 // Therefore, we only commit the parts that are needed to operate: the heap view,
60 // and the zero page.
61 //
62 // Note: we could instead commit the entire map, and piggyback on OS virtual memory
63 // subsystem for mapping not-yet-written-to pages to a single physical backing page,
64 // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
65
66 MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
67
68 size_t page_size = os::vm_page_size();
69
70 if (!_map_space.special()) {
71 // Commit entire pages that cover the heap cset map.
72 char* bot_addr = align_down(_cset_map, page_size);
73 char* top_addr = align_up(_cset_map + _map_size, page_size);
74 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
75 "Unable to commit collection set bitmap: heap");
76
77 // Commit the zero page, if not yet covered by heap cset map.
78 if (bot_addr != _biased_cset_map) {
79 os::commit_memory_or_exit(_biased_cset_map, page_size, false,
80 "Unable to commit collection set bitmap: zero page");
81 }
82 }
83
84 Copy::zero_to_bytes(_cset_map, _map_size);
85 Copy::zero_to_bytes(_biased_cset_map, page_size);
86 }
87
88 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
89 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
90 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
91 assert(!is_in(r), "Already in collection set");
92 assert(!r->is_humongous(), "Only add regular regions to the collection set");
93
94 _cset_map[r->index()] = 1;
95 size_t live = r->get_live_data_bytes();
96 size_t garbage = r->garbage();
97 size_t free = r->free();
98 if (r->is_young()) {
99 _young_bytes_to_evacuate += live;
100 _young_available_bytes_collected += free;
101 if (ShenandoahHeap::heap()->mode()->is_generational() && r->age() >= ShenandoahGenerationalHeap::heap()->age_census()->tenuring_threshold()) {
102 _young_bytes_to_promote += live;
103 }
104 } else if (r->is_old()) {
105 _old_bytes_to_evacuate += live;
106 _old_garbage += garbage;
107 }
108
109 _region_count++;
110 _has_old_regions |= r->is_old();
111 _garbage += garbage;
112 _used += r->used();
113 _live += live;
114 // Update the region status too. State transition would be checked internally.
115 r->make_cset();
116 }
117
118 void ShenandoahCollectionSet::clear() {
119 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
120
121 Copy::zero_to_bytes(_cset_map, _map_size);
122
123 #ifdef ASSERT
124 for (size_t index = 0; index < _heap->num_regions(); index ++) {
125 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
126 }
127 #endif
128
129 _garbage = 0;
130 _old_garbage = 0;
131 _used = 0;
132 _live = 0;
133
134 _region_count = 0;
135 _current_index = 0;
136
137 _young_bytes_to_evacuate = 0;
138 _young_bytes_to_promote = 0;
139 _old_bytes_to_evacuate = 0;
140
141 _young_available_bytes_collected = 0;
142
143 _has_old_regions = false;
144 }
145
146 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
147 // This code is optimized for the case when collection set contains only
148 // a few regions. In this case, it is more constructive to check for is_in
149 // before hitting the (potentially contended) atomic index.
150
151 size_t max = _heap->num_regions();
152 size_t old = Atomic::load(&_current_index);
153
154 for (size_t index = old; index < max; index++) {
155 if (is_in(index)) {
156 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
157 assert(cur >= old, "Always move forward");
158 if (cur == old) {
159 // Successfully moved the claim index, this is our region.
160 return _heap->get_region(index);
161 } else {
162 // Somebody else moved the claim index, restart from there.
163 index = cur - 1; // adjust for loop post-increment
167 }
168 return nullptr;
169 }
170
171 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
172 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
173 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
174
175 size_t max = _heap->num_regions();
176 for (size_t index = _current_index; index < max; index++) {
177 if (is_in(index)) {
178 _current_index = index + 1;
179 return _heap->get_region(index);
180 }
181 }
182
183 return nullptr;
184 }
185
186 void ShenandoahCollectionSet::print_on(outputStream* out) const {
187 out->print_cr("Collection Set: Regions: "
188 SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(),
189 byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()),
190 byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()),
191 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
192
193 debug_only(size_t regions = 0;)
194 for (size_t index = 0; index < _heap->num_regions(); index ++) {
195 if (is_in(index)) {
196 _heap->get_region(index)->print_on(out);
197 debug_only(regions ++;)
198 }
199 }
200 assert(regions == count(), "Must match");
201 }
|