1 /*
2 * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
27
28 #include "gc/shenandoah/shenandoahMark.hpp"
29
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahAsserts.hpp"
32 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
34 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
35 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
36 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
37 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
38 #include "gc/shenandoah/shenandoahUtils.hpp"
39 #include "memory/iterator.inline.hpp"
40 #include "oops/compressedOops.inline.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/prefetch.inline.hpp"
43 #include "utilities/devirtualizer.inline.hpp"
44 #include "utilities/powerOfTwo.hpp"
45
46 template <StringDedupMode STRING_DEDUP>
47 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
48 if (STRING_DEDUP == ENQUEUE_DEDUP) {
49 if (ShenandoahStringDedup::is_candidate(obj)) {
50 req->add(obj);
51 }
52 } else if (STRING_DEDUP == ALWAYS_DEDUP) {
53 if (ShenandoahStringDedup::is_string_candidate(obj) &&
54 !ShenandoahStringDedup::dedup_requested(obj)) {
55 req->add(obj);
56 }
57 }
58 }
59
60 template <class T, ShenandoahGenerationType GENERATION, StringDedupMode STRING_DEDUP>
61 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task) {
62 oop obj = task->obj();
63
64 shenandoah_assert_not_forwarded(nullptr, obj);
65 shenandoah_assert_marked(nullptr, obj);
66 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
67
68 // Are we in weak subgraph scan?
69 bool weak = task->is_weak();
70 cl->set_weak(weak);
71
72 if (task->is_not_chunked()) {
73 if (obj->is_instance()) {
74 // Case 1: Normal oop, process as usual.
75 if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
76 // Loom doesn't support mixing of weak marking and strong marking of
77 // stack chunks.
78 cl->set_weak(false);
79 }
80
81 obj->oop_iterate(cl);
82 dedup_string<STRING_DEDUP>(obj, req);
83 } else if (obj->is_objArray()) {
84 // Case 2: Object array instance and no chunk is set. Must be the first
85 // time we visit it, start the chunked processing.
86 do_chunked_array_start<T>(q, cl, obj, weak);
87 } else {
88 // Case 3: Primitive array. Do nothing, no oops there. We use the same
89 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
90 // We skip iterating over the klass pointer since we know that
91 // Universe::TypeArrayKlass never moves.
92 assert (obj->is_typeArray(), "should be type array");
93 }
94 // Count liveness the last: push the outstanding work to the queues first
95 // Avoid double-counting objects that are visited twice due to upgrade
96 // from final- to strong mark.
97 if (task->count_liveness()) {
98 count_liveness<GENERATION>(live_data, obj);
99 }
100 } else {
101 // Case 4: Array chunk, has sensible chunk id. Process it.
102 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
103 }
104 }
105
106 template <ShenandoahGenerationType GENERATION>
107 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj) {
108 ShenandoahHeap* const heap = ShenandoahHeap::heap();
109 size_t region_idx = heap->heap_region_index_containing(obj);
110 ShenandoahHeapRegion* region = heap->get_region(region_idx);
111 size_t size = obj->size();
112
113 if (!region->is_humongous_start()) {
114 assert(!region->is_humongous(), "Cannot have continuations here");
115 ShenandoahLiveData cur = live_data[region_idx];
116 size_t new_val = size + cur;
117 if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
118 // overflow, flush to region data
119 region->increase_live_data_gc_words(new_val);
120 live_data[region_idx] = 0;
121 } else {
122 // still good, remember in locals
123 live_data[region_idx] = (ShenandoahLiveData) new_val;
124 }
125 } else {
126 shenandoah_assert_in_correct_region(nullptr, obj);
127 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
128
129 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
130 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
131 assert(chain_reg->is_humongous(), "Expecting a humongous region");
132 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
133 }
134 }
135 }
136
137 template <class T>
138 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
139 assert(obj->is_objArray(), "expect object array");
140 objArrayOop array = objArrayOop(obj);
141 int len = array->length();
142
143 // Mark objArray klass metadata
144 if (Devirtualizer::do_metadata(cl)) {
145 Devirtualizer::do_klass(cl, array->klass());
146 }
147
148 if (len <= (int) ObjArrayMarkingStride*2) {
149 // A few slices only, process directly
150 array->oop_iterate_range(cl, 0, len);
151 } else {
218 }
219
220 int chunk_size = 1 << pow;
221
222 int from = (chunk - 1) * chunk_size;
223 int to = chunk * chunk_size;
224
225 #ifdef ASSERT
226 int len = array->length();
227 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
228 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
229 #endif
230
231 array->oop_iterate_range(cl, from, to);
232 }
233
234 template <ShenandoahGenerationType GENERATION>
235 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
236 private:
237 ShenandoahObjToScanQueue* _queue;
238 ShenandoahHeap* _heap;
239 ShenandoahMarkingContext* const _mark_context;
240 public:
241 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) :
242 _queue(q),
243 _heap(ShenandoahHeap::heap()),
244 _mark_context(_heap->marking_context())
245 {
246 }
247
248 void do_buffer(void **buffer, size_t size) {
249 assert(size == 0 || !_heap->has_forwarded_objects(), "Forwarded objects are not expected here");
250 for (size_t i = 0; i < size; ++i) {
251 oop *p = (oop *) &buffer[i];
252 ShenandoahMark::mark_through_ref<oop, GENERATION>(p, _queue, _mark_context, false);
253 }
254 }
255 };
256
257 template<class T, ShenandoahGenerationType GENERATION>
258 inline void ShenandoahMark::mark_through_ref(T* p, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak) {
259 T o = RawAccess<>::oop_load(p);
260 if (!CompressedOops::is_null(o)) {
261 oop obj = CompressedOops::decode_not_null(o);
262
263 shenandoah_assert_not_forwarded(p, obj);
264 shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
265
266 bool skip_live = false;
267 bool marked;
268 if (weak) {
269 marked = mark_context->mark_weak(obj);
270 } else {
271 marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
272 }
273 if (marked) {
274 bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
275 assert(pushed, "overflow queue should always succeed pushing");
276 }
277
278 shenandoah_assert_marked(p, obj);
279 }
280 }
281
282 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
283 return _task_queues;
284 }
285
286 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
287 return _task_queues->queue(index);
288 }
289 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
|
1 /*
2 * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
27 #define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
28
29 #include "gc/shenandoah/shenandoahMark.hpp"
30
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
33 #include "gc/shenandoah/shenandoahAsserts.hpp"
34 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
40 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
41 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
42 #include "gc/shenandoah/shenandoahUtils.hpp"
43 #include "memory/iterator.inline.hpp"
44 #include "oops/compressedOops.inline.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "utilities/devirtualizer.inline.hpp"
48 #include "utilities/powerOfTwo.hpp"
49
50 template <StringDedupMode STRING_DEDUP>
51 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
52 if (STRING_DEDUP == ENQUEUE_DEDUP) {
53 if (ShenandoahStringDedup::is_candidate(obj)) {
54 req->add(obj);
55 }
56 } else if (STRING_DEDUP == ALWAYS_DEDUP) {
57 if (ShenandoahStringDedup::is_string_candidate(obj) &&
58 !ShenandoahStringDedup::dedup_requested(obj)) {
59 req->add(obj);
60 }
61 }
62 }
63
64 template <class T, ShenandoahGenerationType GENERATION, StringDedupMode STRING_DEDUP>
65 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task, uint worker_id) {
66 oop obj = task->obj();
67
68 shenandoah_assert_not_forwarded(nullptr, obj);
69 shenandoah_assert_marked(nullptr, obj);
70 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
71
72 // Are we in weak subgraph scan?
73 bool weak = task->is_weak();
74 cl->set_weak(weak);
75
76 if (task->is_not_chunked()) {
77 if (obj->is_instance()) {
78 // Case 1: Normal oop, process as usual.
79 if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
80 // Loom doesn't support mixing of weak marking and strong marking of
81 // stack chunks.
82 cl->set_weak(false);
83 }
84
85 obj->oop_iterate(cl);
86 dedup_string<STRING_DEDUP>(obj, req);
87 } else if (obj->is_objArray()) {
88 // Case 2: Object array instance and no chunk is set. Must be the first
89 // time we visit it, start the chunked processing.
90 do_chunked_array_start<T>(q, cl, obj, weak);
91 } else {
92 // Case 3: Primitive array. Do nothing, no oops there. We use the same
93 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
94 // We skip iterating over the klass pointer since we know that
95 // Universe::TypeArrayKlass never moves.
96 assert (obj->is_typeArray(), "should be type array");
97 }
98 // Count liveness the last: push the outstanding work to the queues first
99 // Avoid double-counting objects that are visited twice due to upgrade
100 // from final- to strong mark.
101 if (task->count_liveness()) {
102 count_liveness<GENERATION>(live_data, obj, worker_id);
103 }
104 } else {
105 // Case 4: Array chunk, has sensible chunk id. Process it.
106 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
107 }
108 }
109
110 template <ShenandoahGenerationType GENERATION>
111 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj, uint worker_id) {
112 const ShenandoahHeap* const heap = ShenandoahHeap::heap();
113 const size_t region_idx = heap->heap_region_index_containing(obj);
114 ShenandoahHeapRegion* const region = heap->get_region(region_idx);
115 const size_t size = obj->size();
116
117 // Age census for objects in the young generation
118 if (GENERATION == YOUNG || (GENERATION == GLOBAL && region->is_young())) {
119 assert(heap->mode()->is_generational(), "Only if generational");
120 if (ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) {
121 assert(region->is_young(), "Only for young objects");
122 uint age = ShenandoahHeap::get_object_age(obj);
123 ShenandoahAgeCensus* const census = ShenandoahGenerationalHeap::heap()->age_census();
124 CENSUS_NOISE(census->add(age, region->age(), region->youth(), size, worker_id);)
125 NO_CENSUS_NOISE(census->add(age, region->age(), size, worker_id);)
126 }
127 }
128
129 if (!region->is_humongous_start()) {
130 assert(!region->is_humongous(), "Cannot have continuations here");
131 assert(region->is_affiliated(), "Do not count live data within Free Regular Region " SIZE_FORMAT, region_idx);
132 ShenandoahLiveData cur = live_data[region_idx];
133 size_t new_val = size + cur;
134 if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
135 // overflow, flush to region data
136 region->increase_live_data_gc_words(new_val);
137 live_data[region_idx] = 0;
138 } else {
139 // still good, remember in locals
140 live_data[region_idx] = (ShenandoahLiveData) new_val;
141 }
142 } else {
143 shenandoah_assert_in_correct_region(nullptr, obj);
144 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
145
146 assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region " SIZE_FORMAT, region_idx);
147 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
148 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
149 assert(chain_reg->is_humongous(), "Expecting a humongous region");
150 assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region " SIZE_FORMAT, i);
151 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
152 }
153 }
154 }
155
156 template <class T>
157 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
158 assert(obj->is_objArray(), "expect object array");
159 objArrayOop array = objArrayOop(obj);
160 int len = array->length();
161
162 // Mark objArray klass metadata
163 if (Devirtualizer::do_metadata(cl)) {
164 Devirtualizer::do_klass(cl, array->klass());
165 }
166
167 if (len <= (int) ObjArrayMarkingStride*2) {
168 // A few slices only, process directly
169 array->oop_iterate_range(cl, 0, len);
170 } else {
237 }
238
239 int chunk_size = 1 << pow;
240
241 int from = (chunk - 1) * chunk_size;
242 int to = chunk * chunk_size;
243
244 #ifdef ASSERT
245 int len = array->length();
246 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
247 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
248 #endif
249
250 array->oop_iterate_range(cl, from, to);
251 }
252
253 template <ShenandoahGenerationType GENERATION>
254 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
255 private:
256 ShenandoahObjToScanQueue* _queue;
257 ShenandoahObjToScanQueue* _old_queue;
258 ShenandoahHeap* _heap;
259 ShenandoahMarkingContext* const _mark_context;
260 public:
261 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q) :
262 _queue(q),
263 _old_queue(old_q),
264 _heap(ShenandoahHeap::heap()),
265 _mark_context(_heap->marking_context())
266 {
267 }
268
269 void do_buffer(void **buffer, size_t size) {
270 assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
271 for (size_t i = 0; i < size; ++i) {
272 oop *p = (oop *) &buffer[i];
273 ShenandoahMark::mark_through_ref<oop, GENERATION>(p, _queue, _old_queue, _mark_context, false);
274 }
275 }
276 };
277
278 template<ShenandoahGenerationType GENERATION>
279 bool ShenandoahMark::in_generation(ShenandoahHeap* const heap, oop obj) {
280 // Each in-line expansion of in_generation() resolves GENERATION at compile time.
281 if (GENERATION == YOUNG) {
282 return heap->is_in_young(obj);
283 }
284
285 if (GENERATION == OLD) {
286 return heap->is_in_old(obj);
287 }
288
289 assert((GENERATION == GLOBAL || GENERATION == NON_GEN), "Unexpected generation type");
290 assert(heap->is_in(obj), "Object must be in heap");
291 return true;
292 }
293
294 template<class T, ShenandoahGenerationType GENERATION>
295 inline void ShenandoahMark::mark_through_ref(T *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
296 // Note: This is a very hot code path, so the code should be conditional on GENERATION template
297 // parameter where possible, in order to generate the most efficient code.
298
299 T o = RawAccess<>::oop_load(p);
300 if (!CompressedOops::is_null(o)) {
301 oop obj = CompressedOops::decode_not_null(o);
302
303 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
304 shenandoah_assert_not_forwarded(p, obj);
305 shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc());
306 if (in_generation<GENERATION>(heap, obj)) {
307 mark_ref(q, mark_context, weak, obj);
308 shenandoah_assert_marked(p, obj);
309 if (GENERATION == YOUNG && heap->is_in_old(p)) {
310 // Mark card as dirty because remembered set scanning still finds interesting pointer.
311 heap->old_generation()->mark_card_as_dirty((HeapWord*)p);
312 } else if (GENERATION == GLOBAL && heap->is_in_old(p) && heap->is_in_young(obj)) {
313 // Mark card as dirty because GLOBAL marking finds interesting pointer.
314 heap->old_generation()->mark_card_as_dirty((HeapWord*)p);
315 }
316 } else if (old_q != nullptr) {
317 // Young mark, bootstrapping old_q or concurrent with old_q marking.
318 mark_ref(old_q, mark_context, weak, obj);
319 shenandoah_assert_marked(p, obj);
320 } else if (GENERATION == OLD) {
321 // Old mark, found a young pointer.
322 if (heap->is_in(p)) {
323 assert(heap->is_in_young(obj), "Expected young object.");
324 heap->old_generation()->mark_card_as_dirty(p);
325 }
326 }
327 }
328 }
329
330 template<>
331 inline void ShenandoahMark::mark_through_ref<oop, ShenandoahGenerationType::NON_GEN>(oop *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
332 mark_non_generational_ref(p, q, mark_context, weak);
333 }
334
335 template<>
336 inline void ShenandoahMark::mark_through_ref<narrowOop, ShenandoahGenerationType::NON_GEN>(narrowOop *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
337 mark_non_generational_ref(p, q, mark_context, weak);
338 }
339
340 template<class T>
341 inline void ShenandoahMark::mark_non_generational_ref(T* p, ShenandoahObjToScanQueue* q,
342 ShenandoahMarkingContext* const mark_context, bool weak) {
343 oop o = RawAccess<>::oop_load(p);
344 if (!CompressedOops::is_null(o)) {
345 oop obj = CompressedOops::decode_not_null(o);
346
347 shenandoah_assert_not_forwarded(p, obj);
348 shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
349
350 mark_ref(q, mark_context, weak, obj);
351
352 shenandoah_assert_marked(p, obj);
353 }
354 }
355
356 inline void ShenandoahMark::mark_ref(ShenandoahObjToScanQueue* q,
357 ShenandoahMarkingContext* const mark_context,
358 bool weak, oop obj) {
359 bool skip_live = false;
360 bool marked;
361 if (weak) {
362 marked = mark_context->mark_weak(obj);
363 } else {
364 marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
365 }
366 if (marked) {
367 bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
368 assert(pushed, "overflow queue should always succeed pushing");
369 }
370 }
371
372 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
373 return _task_queues;
374 }
375
376 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
377 return _task_queues->queue(index);
378 }
379
380 ShenandoahObjToScanQueue* ShenandoahMark::get_old_queue(uint index) const {
381 if (_old_gen_task_queues != nullptr) {
382 return _old_gen_task_queues->queue(index);
383 }
384 return nullptr;
385 }
386
387 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
|