23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
31 #include "gc/shenandoah/shenandoahUtils.hpp"
32 #include "runtime/atomic.hpp"
33 #include "services/memTracker.hpp"
34 #include "utilities/copy.hpp"
35
36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
37 _map_size(heap->num_regions()),
38 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
39 _map_space(space),
40 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
41 _biased_cset_map(_map_space.base()),
42 _heap(heap),
43 _garbage(0),
44 _used(0),
45 _region_count(0),
46 _current_index(0) {
47
48 // The collection set map is reserved to cover the entire heap *and* zero addresses.
49 // This is needed to accept in-cset checks for both heap oops and nulls, freeing
50 // high-performance code from checking for null first.
51 //
52 // Since heap_base can be far away, committing the entire map would waste memory.
53 // Therefore, we only commit the parts that are needed to operate: the heap view,
54 // and the zero page.
55 //
56 // Note: we could instead commit the entire map, and piggyback on OS virtual memory
57 // subsystem for mapping not-yet-written-to pages to a single physical backing page,
58 // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
59
60 MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
61
62 size_t page_size = os::vm_page_size();
63
64 if (!_map_space.special()) {
65 // Commit entire pages that cover the heap cset map.
66 char* bot_addr = align_down(_cset_map, page_size);
67 char* top_addr = align_up(_cset_map + _map_size, page_size);
68 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
69 "Unable to commit collection set bitmap: heap");
70
71 // Commit the zero page, if not yet covered by heap cset map.
72 if (bot_addr != _biased_cset_map) {
73 os::commit_memory_or_exit(_biased_cset_map, page_size, false,
74 "Unable to commit collection set bitmap: zero page");
75 }
76 }
77
78 Copy::zero_to_bytes(_cset_map, _map_size);
79 Copy::zero_to_bytes(_biased_cset_map, page_size);
80 }
81
82 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
83 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
84 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
85 assert(!is_in(r), "Already in collection set");
86 _cset_map[r->index()] = 1;
87 _region_count++;
88 _garbage += r->garbage();
89 _used += r->used();
90
91 // Update the region status too. State transition would be checked internally.
92 r->make_cset();
93 }
94
95 void ShenandoahCollectionSet::clear() {
96 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
97 Copy::zero_to_bytes(_cset_map, _map_size);
98
99 #ifdef ASSERT
100 for (size_t index = 0; index < _heap->num_regions(); index ++) {
101 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
102 }
103 #endif
104
105 _garbage = 0;
106 _used = 0;
107
108 _region_count = 0;
109 _current_index = 0;
110 }
111
112 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
113 // This code is optimized for the case when collection set contains only
114 // a few regions. In this case, it is more constructive to check for is_in
115 // before hitting the (potentially contended) atomic index.
116
117 size_t max = _heap->num_regions();
118 size_t old = Atomic::load(&_current_index);
119
120 for (size_t index = old; index < max; index++) {
121 if (is_in(index)) {
122 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
123 assert(cur >= old, "Always move forward");
124 if (cur == old) {
125 // Successfully moved the claim index, this is our region.
126 return _heap->get_region(index);
127 } else {
128 // Somebody else moved the claim index, restart from there.
129 index = cur - 1; // adjust for loop post-increment
133 }
134 return nullptr;
135 }
136
137 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
138 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
139 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
140
141 size_t max = _heap->num_regions();
142 for (size_t index = _current_index; index < max; index++) {
143 if (is_in(index)) {
144 _current_index = index + 1;
145 return _heap->get_region(index);
146 }
147 }
148
149 return nullptr;
150 }
151
152 void ShenandoahCollectionSet::print_on(outputStream* out) const {
153 out->print_cr("Collection Set : " SIZE_FORMAT "", count());
154
155 debug_only(size_t regions = 0;)
156 for (size_t index = 0; index < _heap->num_regions(); index ++) {
157 if (is_in(index)) {
158 _heap->get_region(index)->print_on(out);
159 debug_only(regions ++;)
160 }
161 }
162 assert(regions == count(), "Must match");
163 }
|
23 */
24
25 #include "precompiled.hpp"
26
27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
31 #include "gc/shenandoah/shenandoahUtils.hpp"
32 #include "runtime/atomic.hpp"
33 #include "services/memTracker.hpp"
34 #include "utilities/copy.hpp"
35
36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
37 _map_size(heap->num_regions()),
38 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
39 _map_space(space),
40 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
41 _biased_cset_map(_map_space.base()),
42 _heap(heap),
43 _has_old_regions(false),
44 _garbage(0),
45 _used(0),
46 _live(0),
47 _region_count(0),
48 _old_garbage(0),
49 _current_index(0) {
50
51 // The collection set map is reserved to cover the entire heap *and* zero addresses.
52 // This is needed to accept in-cset checks for both heap oops and nulls, freeing
53 // high-performance code from checking for null first.
54 //
55 // Since heap_base can be far away, committing the entire map would waste memory.
56 // Therefore, we only commit the parts that are needed to operate: the heap view,
57 // and the zero page.
58 //
59 // Note: we could instead commit the entire map, and piggyback on OS virtual memory
60 // subsystem for mapping not-yet-written-to pages to a single physical backing page,
61 // but this is not guaranteed, and would confuse NMT and other memory accounting tools.
62
63 MemTracker::record_virtual_memory_type(_map_space.base(), mtGC);
64
65 size_t page_size = os::vm_page_size();
66
67 if (!_map_space.special()) {
68 // Commit entire pages that cover the heap cset map.
69 char* bot_addr = align_down(_cset_map, page_size);
70 char* top_addr = align_up(_cset_map + _map_size, page_size);
71 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false,
72 "Unable to commit collection set bitmap: heap");
73
74 // Commit the zero page, if not yet covered by heap cset map.
75 if (bot_addr != _biased_cset_map) {
76 os::commit_memory_or_exit(_biased_cset_map, page_size, false,
77 "Unable to commit collection set bitmap: zero page");
78 }
79 }
80
81 Copy::zero_to_bytes(_cset_map, _map_size);
82 Copy::zero_to_bytes(_biased_cset_map, page_size);
83 }
84
85 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
86 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
87 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
88 assert(!is_in(r), "Already in collection set");
89 assert(!r->is_humongous(), "Only add regular regions to the collection set");
90
91 _cset_map[r->index()] = 1;
92
93 if (r->affiliation() == YOUNG_GENERATION) {
94 _young_region_count++;
95 _young_bytes_to_evacuate += r->get_live_data_bytes();
96 if (r->age() >= InitialTenuringThreshold) {
97 _young_bytes_to_promote += r->get_live_data_bytes();
98 }
99 } else if (r->affiliation() == OLD_GENERATION) {
100 _old_region_count++;
101 _old_bytes_to_evacuate += r->get_live_data_bytes();
102 _old_garbage += r->garbage();
103 }
104
105 _region_count++;
106 _has_old_regions |= r->is_old();
107 _garbage += r->garbage();
108 _used += r->used();
109 _live += r->get_live_data_bytes();
110 // Update the region status too. State transition would be checked internally.
111 r->make_cset();
112 }
113
114 void ShenandoahCollectionSet::clear() {
115 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
116 Copy::zero_to_bytes(_cset_map, _map_size);
117
118 #ifdef ASSERT
119 for (size_t index = 0; index < _heap->num_regions(); index ++) {
120 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before");
121 }
122 #endif
123
124 _garbage = 0;
125 _old_garbage = 0;
126 _used = 0;
127 _live = 0;
128
129 _region_count = 0;
130 _current_index = 0;
131
132 _young_region_count = 0;
133 _young_bytes_to_evacuate = 0;
134 _young_bytes_to_promote = 0;
135
136 _old_region_count = 0;
137 _old_bytes_to_evacuate = 0;
138
139 _has_old_regions = false;
140 }
141
142 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
143 // This code is optimized for the case when collection set contains only
144 // a few regions. In this case, it is more constructive to check for is_in
145 // before hitting the (potentially contended) atomic index.
146
147 size_t max = _heap->num_regions();
148 size_t old = Atomic::load(&_current_index);
149
150 for (size_t index = old; index < max; index++) {
151 if (is_in(index)) {
152 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed);
153 assert(cur >= old, "Always move forward");
154 if (cur == old) {
155 // Successfully moved the claim index, this is our region.
156 return _heap->get_region(index);
157 } else {
158 // Somebody else moved the claim index, restart from there.
159 index = cur - 1; // adjust for loop post-increment
163 }
164 return nullptr;
165 }
166
167 ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
168 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
169 assert(Thread::current()->is_VM_thread(), "Must be VMThread");
170
171 size_t max = _heap->num_regions();
172 for (size_t index = _current_index; index < max; index++) {
173 if (is_in(index)) {
174 _current_index = index + 1;
175 return _heap->get_region(index);
176 }
177 }
178
179 return nullptr;
180 }
181
182 void ShenandoahCollectionSet::print_on(outputStream* out) const {
183 out->print_cr("Collection Set: Regions: "
184 SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(),
185 byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()),
186 byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()),
187 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
188
189 debug_only(size_t regions = 0;)
190 for (size_t index = 0; index < _heap->num_regions(); index ++) {
191 if (is_in(index)) {
192 _heap->get_region(index)->print_on(out);
193 debug_only(regions ++;)
194 }
195 }
196 assert(regions == count(), "Must match");
197 }
|