1 /*
2 * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
28 #define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
29
30 #include "gc/shenandoah/shenandoahMark.hpp"
31
32 #include "gc/shared/continuationGCSupport.inline.hpp"
33 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
34 #include "gc/shenandoah/shenandoahAsserts.hpp"
35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
37 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
38 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
39 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
40 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
41 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
42 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
43 #include "gc/shenandoah/shenandoahUtils.hpp"
44 #include "memory/iterator.inline.hpp"
45 #include "oops/compressedOops.inline.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "runtime/prefetch.inline.hpp"
48 #include "utilities/devirtualizer.inline.hpp"
49 #include "utilities/powerOfTwo.hpp"
50
51 template <StringDedupMode STRING_DEDUP>
52 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
53 if (STRING_DEDUP == ENQUEUE_DEDUP) {
54 if (ShenandoahStringDedup::is_candidate(obj)) {
55 req->add(obj);
56 }
57 } else if (STRING_DEDUP == ALWAYS_DEDUP) {
58 if (ShenandoahStringDedup::is_string_candidate(obj) &&
59 !ShenandoahStringDedup::dedup_requested(obj)) {
60 req->add(obj);
61 }
62 }
63 }
64
65 template <class T, ShenandoahGenerationType GENERATION, StringDedupMode STRING_DEDUP>
66 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task, uint worker_id) {
67 oop obj = task->obj();
68
69 shenandoah_assert_not_forwarded(nullptr, obj);
70 shenandoah_assert_marked(nullptr, obj);
71 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
72
73 // Are we in weak subgraph scan?
74 bool weak = task->is_weak();
75 cl->set_weak(weak);
76
77 if (task->is_not_chunked()) {
78 if (obj->is_instance()) {
79 // Case 1: Normal oop, process as usual.
80 if (obj->is_stackChunk()) {
81 // Loom doesn't support mixing of weak marking and strong marking of stack chunks.
82 cl->set_weak(false);
83 }
84
85 obj->oop_iterate(cl);
86 dedup_string<STRING_DEDUP>(obj, req);
87 } else if (obj->is_refArray()) {
88 // Case 2: Object array instance and no chunk is set. Must be the first
89 // time we visit it, start the chunked processing.
90 do_chunked_array_start<T>(q, cl, obj, weak);
91 } else {
92 // Case 3: Primitive array. Do nothing, no oops there. We use the same
93 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
94 // We skip iterating over the klass pointer since we know that
95 // Universe::TypeArrayKlass never moves.
96 assert (obj->is_typeArray(), "should be type array");
97 }
98 // Count liveness the last: push the outstanding work to the queues first
99 // Avoid double-counting objects that are visited twice due to upgrade
100 // from final- to strong mark.
101 if (task->count_liveness()) {
102 count_liveness<GENERATION>(live_data, obj, worker_id);
103 }
104 } else {
105 // Case 4: Array chunk, has sensible chunk id. Process it.
106 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
107 }
108 }
109
110 template <ShenandoahGenerationType GENERATION>
111 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj, uint worker_id) {
112 const ShenandoahHeap* const heap = ShenandoahHeap::heap();
113 const size_t region_idx = heap->heap_region_index_containing(obj);
114 ShenandoahHeapRegion* const region = heap->get_region(region_idx);
115 const size_t size = obj->size();
116
117 // Age census for objects in the young generation
118 if (GENERATION == YOUNG || (GENERATION == GLOBAL && region->is_young())) {
119 assert(heap->mode()->is_generational(), "Only if generational");
120 assert(region->is_young(), "Only for young objects");
121 const uint age = ShenandoahHeap::get_object_age(obj);
122 ShenandoahAgeCensus* const census = ShenandoahGenerationalHeap::heap()->age_census();
123 CENSUS_NOISE(census->add(age, region->age(), region->youth(), size, worker_id);)
124 NO_CENSUS_NOISE(census->add(age, region->age(), size, worker_id);)
125 }
126
127 if (!region->is_humongous_start()) {
128 assert(!region->is_humongous(), "Cannot have continuations here");
129 assert(region->is_affiliated(), "Do not count live data within Free Regular Region %zu", region_idx);
130 ShenandoahLiveData cur = live_data[region_idx];
131 size_t new_val = size + cur;
132 if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
133 // overflow, flush to region data
134 region->increase_live_data_gc_words(new_val);
135 live_data[region_idx] = 0;
136 } else {
137 // still good, remember in locals
138 live_data[region_idx] = (ShenandoahLiveData) new_val;
139 }
140 } else {
141 shenandoah_assert_in_correct_region(nullptr, obj);
142 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
143
144 assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region %zu", region_idx);
145 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
146 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
147 assert(chain_reg->is_humongous(), "Expecting a humongous region");
148 assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region %zu", i);
149 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
150 }
151 }
152 }
153
154 template <class T>
155 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
156 assert(obj->is_refArray(), "expect object array");
157 objArrayOop array = objArrayOop(obj);
158 int len = array->length();
159
160 // Mark objArray klass metadata
161 if (Devirtualizer::do_metadata(cl)) {
162 Devirtualizer::do_klass(cl, array->klass());
163 }
164
165 if (len <= (int) ObjArrayMarkingStride*2) {
166 // A few slices only, process directly
167 array->oop_iterate_elements_range(cl, 0, len);
168 } else {
169 int bits = log2i_graceful(len);
170 // Compensate for non-power-of-two arrays, cover the array in excess:
171 if (len != (1 << bits)) bits++;
172
173 // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
174 // boundaries against array->length(), touching the array header on every chunk.
175 //
176 // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.
177 // If the array is not divided in chunk sizes, then there would be an irregular tail,
178 // which we will process separately.
179
180 int last_idx = 0;
181
182 int chunk = 1;
183 int pow = bits;
184
185 // Handle overflow
186 if (pow >= 31) {
187 assert (pow == 31, "sanity");
188 pow--;
189 chunk = 2;
190 last_idx = (1 << pow);
191 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, 1, pow));
192 assert(pushed, "overflow queue should always succeed pushing");
193 }
194
195 // Split out tasks, as suggested in ShenandoahMarkTask docs. Record the last
196 // successful right boundary to figure out the irregular tail.
197 while ((1 << pow) > (int)ObjArrayMarkingStride &&
198 (chunk*2 < ShenandoahMarkTask::chunk_size())) {
199 pow--;
200 int left_chunk = chunk*2 - 1;
201 int right_chunk = chunk*2;
202 int left_chunk_end = left_chunk * (1 << pow);
203 if (left_chunk_end < len) {
204 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, left_chunk, pow));
205 assert(pushed, "overflow queue should always succeed pushing");
206 chunk = right_chunk;
207 last_idx = left_chunk_end;
208 } else {
209 chunk = left_chunk;
210 }
211 }
212
213 // Process the irregular tail, if present
214 int from = last_idx;
215 if (from < len) {
216 array->oop_iterate_elements_range(cl, from, len);
217 }
218 }
219 }
220
221 template <class T>
222 inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
223 assert(obj->is_refArray(), "expect object array");
224 objArrayOop array = objArrayOop(obj);
225
226 // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that
227 // are known to start beyond the array.
228 while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
229 pow--;
230 chunk *= 2;
231 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
232 assert(pushed, "overflow queue should always succeed pushing");
233 }
234
235 int chunk_size = 1 << pow;
236
237 int from = (chunk - 1) * chunk_size;
238 int to = chunk * chunk_size;
239
240 #ifdef ASSERT
241 int len = array->length();
242 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
243 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
244 #endif
245
246 array->oop_iterate_elements_range(cl, from, to);
247 }
248
249 template <ShenandoahGenerationType GENERATION>
250 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
251 private:
252 ShenandoahObjToScanQueue* _queue;
253 ShenandoahObjToScanQueue* _old_queue;
254 ShenandoahHeap* _heap;
255 ShenandoahMarkingContext* const _mark_context;
256 public:
257 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q) :
258 _queue(q),
259 _old_queue(old_q),
260 _heap(ShenandoahHeap::heap()),
261 _mark_context(_heap->marking_context())
262 {
263 }
264
265 void do_buffer(void **buffer, size_t size) {
266 assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
267 for (size_t i = 0; i < size; ++i) {
268 oop *p = (oop *) &buffer[i];
269 ShenandoahMark::mark_through_ref<oop, GENERATION>(p, _queue, _old_queue, _mark_context, false);
270 }
271 }
272 };
273
274 template<ShenandoahGenerationType GENERATION>
275 bool ShenandoahMark::in_generation(ShenandoahHeap* const heap, oop obj) {
276 // Each in-line expansion of in_generation() resolves GENERATION at compile time.
277 if (GENERATION == YOUNG) {
278 return heap->is_in_young(obj);
279 }
280
281 if (GENERATION == OLD) {
282 return heap->is_in_old(obj);
283 }
284
285 assert((GENERATION == GLOBAL || GENERATION == NON_GEN), "Unexpected generation type");
286 assert(heap->is_in(obj), "Object must be in heap");
287 return true;
288 }
289
290 template<class T, ShenandoahGenerationType GENERATION>
291 inline void ShenandoahMark::mark_through_ref(T *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
292 // Note: This is a very hot code path, so the code should be conditional on GENERATION template
293 // parameter where possible, in order to generate the most efficient code.
294
295 T o = RawAccess<>::oop_load(p);
296 if (!CompressedOops::is_null(o)) {
297 oop obj = CompressedOops::decode_not_null(o);
298
299 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
300 shenandoah_assert_not_forwarded(p, obj);
301 shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc());
302 if (in_generation<GENERATION>(heap, obj)) {
303 mark_ref(q, mark_context, weak, obj);
304 shenandoah_assert_marked(p, obj);
305 if (GENERATION == YOUNG && heap->is_in_old(p)) {
306 // Mark card as dirty because remembered set scanning still finds interesting pointer.
307 heap->old_generation()->mark_card_as_dirty((HeapWord*)p);
308 } else if (GENERATION == GLOBAL && heap->is_in_old(p) && heap->is_in_young(obj)) {
309 // Mark card as dirty because GLOBAL marking finds interesting pointer.
310 heap->old_generation()->mark_card_as_dirty((HeapWord*)p);
311 }
312 } else if (old_q != nullptr) {
313 // Young mark, bootstrapping old_q or concurrent with old_q marking.
314 mark_ref(old_q, mark_context, weak, obj);
315 shenandoah_assert_marked(p, obj);
316 } else if (GENERATION == OLD) {
317 // Old mark, found a young pointer.
318 if (heap->is_in(p)) {
319 assert(heap->is_in_young(obj), "Expected young object.");
320 heap->old_generation()->mark_card_as_dirty(p);
321 }
322 }
323 }
324 }
325
326 template<>
327 inline void ShenandoahMark::mark_through_ref<oop, ShenandoahGenerationType::NON_GEN>(oop *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
328 mark_non_generational_ref(p, q, mark_context, weak);
329 }
330
331 template<>
332 inline void ShenandoahMark::mark_through_ref<narrowOop, ShenandoahGenerationType::NON_GEN>(narrowOop *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
333 mark_non_generational_ref(p, q, mark_context, weak);
334 }
335
336 template<class T>
337 inline void ShenandoahMark::mark_non_generational_ref(T* p, ShenandoahObjToScanQueue* q,
338 ShenandoahMarkingContext* const mark_context, bool weak) {
339 oop o = RawAccess<>::oop_load(p);
340 if (!CompressedOops::is_null(o)) {
341 oop obj = CompressedOops::decode_not_null(o);
342
343 shenandoah_assert_not_forwarded(p, obj);
344 shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
345
346 mark_ref(q, mark_context, weak, obj);
347
348 shenandoah_assert_marked(p, obj);
349 }
350 }
351
352 inline void ShenandoahMark::mark_ref(ShenandoahObjToScanQueue* q,
353 ShenandoahMarkingContext* const mark_context,
354 bool weak, oop obj) {
355 bool skip_live = false;
356 bool marked;
357 if (weak) {
358 marked = mark_context->mark_weak(obj);
359 } else {
360 marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
361 }
362 if (marked) {
363 bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
364 assert(pushed, "overflow queue should always succeed pushing");
365 }
366 }
367
368 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
369 return _task_queues;
370 }
371
372 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
373 return _task_queues->queue(index);
374 }
375
376 ShenandoahObjToScanQueue* ShenandoahMark::get_old_queue(uint index) const {
377 if (_old_gen_task_queues != nullptr) {
378 return _old_gen_task_queues->queue(index);
379 }
380 return nullptr;
381 }
382
383 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP