1 /*
2 * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
26 #define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
27
28 #include "gc/shenandoah/shenandoahMark.hpp"
29
30 #include "gc/shared/continuationGCSupport.inline.hpp"
31 #include "gc/shenandoah/shenandoahAsserts.hpp"
32 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
33 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
34 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
35 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
36 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
37 #include "gc/shenandoah/shenandoahUtils.hpp"
38 #include "memory/iterator.inline.hpp"
39 #include "oops/compressedOops.inline.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "runtime/prefetch.inline.hpp"
42 #include "utilities/devirtualizer.inline.hpp"
43 #include "utilities/powerOfTwo.hpp"
44
45 template <StringDedupMode STRING_DEDUP>
46 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
47 if (STRING_DEDUP == ENQUEUE_DEDUP) {
48 if (ShenandoahStringDedup::is_candidate(obj)) {
49 req->add(obj);
50 }
51 } else if (STRING_DEDUP == ALWAYS_DEDUP) {
52 if (ShenandoahStringDedup::is_string_candidate(obj) &&
53 !ShenandoahStringDedup::dedup_requested(obj)) {
54 req->add(obj);
55 }
56 }
57 }
58
59 template <class T, StringDedupMode STRING_DEDUP>
60 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task) {
61 oop obj = task->obj();
62
63 shenandoah_assert_not_forwarded(nullptr, obj);
64 shenandoah_assert_marked(nullptr, obj);
65 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
66
67 // Are we in weak subgraph scan?
68 bool weak = task->is_weak();
69 cl->set_weak(weak);
70
71 if (task->is_not_chunked()) {
72 if (obj->is_instance()) {
73 // Case 1: Normal oop, process as usual.
74 if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
75 // Loom doesn't support mixing of weak marking and strong marking of
76 // stack chunks.
77 cl->set_weak(false);
78 }
79
80 obj->oop_iterate(cl);
81 dedup_string<STRING_DEDUP>(obj, req);
82 } else if (obj->is_objArray()) {
83 // Case 2: Object array instance and no chunk is set. Must be the first
84 // time we visit it, start the chunked processing.
85 do_chunked_array_start<T>(q, cl, obj, weak);
86 } else {
87 // Case 3: Primitive array. Do nothing, no oops there. We use the same
88 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
89 // We skip iterating over the klass pointer since we know that
90 // Universe::TypeArrayKlass never moves.
91 assert (obj->is_typeArray(), "should be type array");
92 }
93 // Count liveness the last: push the outstanding work to the queues first
94 // Avoid double-counting objects that are visited twice due to upgrade
95 // from final- to strong mark.
96 if (task->count_liveness()) {
97 count_liveness(live_data, obj);
98 }
99 } else {
100 // Case 4: Array chunk, has sensible chunk id. Process it.
101 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
102 }
103 }
104
105 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj) {
106 ShenandoahHeap* const heap = ShenandoahHeap::heap();
107 size_t region_idx = heap->heap_region_index_containing(obj);
108 ShenandoahHeapRegion* region = heap->get_region(region_idx);
109 size_t size = obj->size();
110
111 if (!region->is_humongous_start()) {
112 assert(!region->is_humongous(), "Cannot have continuations here");
113 ShenandoahLiveData cur = live_data[region_idx];
114 size_t new_val = size + cur;
115 if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
116 // overflow, flush to region data
117 region->increase_live_data_gc_words(new_val);
118 live_data[region_idx] = 0;
119 } else {
120 // still good, remember in locals
121 live_data[region_idx] = (ShenandoahLiveData) new_val;
122 }
123 } else {
124 shenandoah_assert_in_correct_region(nullptr, obj);
125 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
126
127 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
128 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
129 assert(chain_reg->is_humongous(), "Expecting a humongous region");
130 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
131 }
132 }
133 }
134
135 template <class T>
136 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
137 assert(obj->is_objArray(), "expect object array");
138 objArrayOop array = objArrayOop(obj);
139 int len = array->length();
140
141 // Mark objArray klass metadata
142 if (Devirtualizer::do_metadata(cl)) {
143 Devirtualizer::do_klass(cl, array->klass());
144 }
145
146 if (len <= (int) ObjArrayMarkingStride*2) {
147 // A few slices only, process directly
148 array->oop_iterate_range(cl, 0, len);
149 } else {
212 pow--;
213 chunk *= 2;
214 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
215 assert(pushed, "overflow queue should always succeed pushing");
216 }
217
218 int chunk_size = 1 << pow;
219
220 int from = (chunk - 1) * chunk_size;
221 int to = chunk * chunk_size;
222
223 #ifdef ASSERT
224 int len = array->length();
225 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
226 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
227 #endif
228
229 array->oop_iterate_range(cl, from, to);
230 }
231
232 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
233 private:
234 ShenandoahObjToScanQueue* _queue;
235 ShenandoahHeap* _heap;
236 ShenandoahMarkingContext* const _mark_context;
237 public:
238 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) :
239 _queue(q),
240 _heap(ShenandoahHeap::heap()),
241 _mark_context(_heap->marking_context())
242 {
243 }
244
245 void do_buffer(void **buffer, size_t size) {
246 assert(size == 0 || !_heap->has_forwarded_objects(), "Forwarded objects are not expected here");
247 for (size_t i = 0; i < size; ++i) {
248 oop *p = (oop *) &buffer[i];
249 ShenandoahMark::mark_through_ref<oop>(p, _queue, _mark_context, false);
250 }
251 }
252 };
253
254 template<class T>
255 inline void ShenandoahMark::mark_through_ref(T* p, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak) {
256 T o = RawAccess<>::oop_load(p);
257 if (!CompressedOops::is_null(o)) {
258 oop obj = CompressedOops::decode_not_null(o);
259
260 shenandoah_assert_not_forwarded(p, obj);
261 shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
262
263 bool skip_live = false;
264 bool marked;
265 if (weak) {
266 marked = mark_context->mark_weak(obj);
267 } else {
268 marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
269 }
270 if (marked) {
271 bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
272 assert(pushed, "overflow queue should always succeed pushing");
273 }
274
275 shenandoah_assert_marked(p, obj);
276 }
277 }
278
279 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
280 return _task_queues;
281 }
282
283 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
284 return _task_queues->queue(index);
285 }
286 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
|
1 /*
2 * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
27 #define SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
28
29 #include "gc/shenandoah/shenandoahMark.hpp"
30
31 #include "gc/shared/continuationGCSupport.inline.hpp"
32 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
33 #include "gc/shenandoah/shenandoahAsserts.hpp"
34 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
37 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
40 #include "gc/shenandoah/shenandoahStringDedup.inline.hpp"
41 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
42 #include "gc/shenandoah/shenandoahUtils.hpp"
43 #include "memory/iterator.inline.hpp"
44 #include "oops/compressedOops.inline.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/prefetch.inline.hpp"
47 #include "utilities/devirtualizer.inline.hpp"
48 #include "utilities/powerOfTwo.hpp"
49
50 template <StringDedupMode STRING_DEDUP>
51 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
52 if (STRING_DEDUP == ENQUEUE_DEDUP) {
53 if (ShenandoahStringDedup::is_candidate(obj)) {
54 req->add(obj);
55 }
56 } else if (STRING_DEDUP == ALWAYS_DEDUP) {
57 if (ShenandoahStringDedup::is_string_candidate(obj) &&
58 !ShenandoahStringDedup::dedup_requested(obj)) {
59 req->add(obj);
60 }
61 }
62 }
63
64 template <class T, ShenandoahGenerationType GENERATION, StringDedupMode STRING_DEDUP>
65 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task, uint worker_id) {
66 oop obj = task->obj();
67
68 shenandoah_assert_not_forwarded(nullptr, obj);
69 shenandoah_assert_marked(nullptr, obj);
70 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
71
72 // Are we in weak subgraph scan?
73 bool weak = task->is_weak();
74 cl->set_weak(weak);
75
76 if (task->is_not_chunked()) {
77 if (obj->is_instance()) {
78 // Case 1: Normal oop, process as usual.
79 if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
80 // Loom doesn't support mixing of weak marking and strong marking of
81 // stack chunks.
82 cl->set_weak(false);
83 }
84
85 obj->oop_iterate(cl);
86 dedup_string<STRING_DEDUP>(obj, req);
87 } else if (obj->is_objArray()) {
88 // Case 2: Object array instance and no chunk is set. Must be the first
89 // time we visit it, start the chunked processing.
90 do_chunked_array_start<T>(q, cl, obj, weak);
91 } else {
92 // Case 3: Primitive array. Do nothing, no oops there. We use the same
93 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
94 // We skip iterating over the klass pointer since we know that
95 // Universe::TypeArrayKlass never moves.
96 assert (obj->is_typeArray(), "should be type array");
97 }
98 // Count liveness the last: push the outstanding work to the queues first
99 // Avoid double-counting objects that are visited twice due to upgrade
100 // from final- to strong mark.
101 if (task->count_liveness()) {
102 count_liveness<GENERATION>(live_data, obj, worker_id);
103 }
104 } else {
105 // Case 4: Array chunk, has sensible chunk id. Process it.
106 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
107 }
108 }
109
110 template <ShenandoahGenerationType GENERATION>
111 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj, uint worker_id) {
112 const ShenandoahHeap* const heap = ShenandoahHeap::heap();
113 const size_t region_idx = heap->heap_region_index_containing(obj);
114 ShenandoahHeapRegion* const region = heap->get_region(region_idx);
115 const size_t size = obj->size();
116
117 // Age census for objects in the young generation
118 if (GENERATION == YOUNG || (GENERATION == GLOBAL && region->is_young())) {
119 assert(heap->mode()->is_generational(), "Only if generational");
120 if (ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) {
121 assert(region->is_young(), "Only for young objects");
122 uint age = ShenandoahHeap::get_object_age(obj);
123 ShenandoahAgeCensus* const census = ShenandoahGenerationalHeap::heap()->age_census();
124 CENSUS_NOISE(census->add(age, region->age(), region->youth(), size, worker_id);)
125 NO_CENSUS_NOISE(census->add(age, region->age(), size, worker_id);)
126 }
127 }
128
129 if (!region->is_humongous_start()) {
130 assert(!region->is_humongous(), "Cannot have continuations here");
131 assert(region->is_affiliated(), "Do not count live data within Free Regular Region " SIZE_FORMAT, region_idx);
132 ShenandoahLiveData cur = live_data[region_idx];
133 size_t new_val = size + cur;
134 if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
135 // overflow, flush to region data
136 region->increase_live_data_gc_words(new_val);
137 live_data[region_idx] = 0;
138 } else {
139 // still good, remember in locals
140 live_data[region_idx] = (ShenandoahLiveData) new_val;
141 }
142 } else {
143 shenandoah_assert_in_correct_region(nullptr, obj);
144 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
145
146 assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region " SIZE_FORMAT, region_idx);
147 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
148 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
149 assert(chain_reg->is_humongous(), "Expecting a humongous region");
150 assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region " SIZE_FORMAT, i);
151 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
152 }
153 }
154 }
155
156 template <class T>
157 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
158 assert(obj->is_objArray(), "expect object array");
159 objArrayOop array = objArrayOop(obj);
160 int len = array->length();
161
162 // Mark objArray klass metadata
163 if (Devirtualizer::do_metadata(cl)) {
164 Devirtualizer::do_klass(cl, array->klass());
165 }
166
167 if (len <= (int) ObjArrayMarkingStride*2) {
168 // A few slices only, process directly
169 array->oop_iterate_range(cl, 0, len);
170 } else {
233 pow--;
234 chunk *= 2;
235 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
236 assert(pushed, "overflow queue should always succeed pushing");
237 }
238
239 int chunk_size = 1 << pow;
240
241 int from = (chunk - 1) * chunk_size;
242 int to = chunk * chunk_size;
243
244 #ifdef ASSERT
245 int len = array->length();
246 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
247 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
248 #endif
249
250 array->oop_iterate_range(cl, from, to);
251 }
252
253 template <ShenandoahGenerationType GENERATION>
254 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
255 private:
256 ShenandoahObjToScanQueue* _queue;
257 ShenandoahObjToScanQueue* _old_queue;
258 ShenandoahHeap* _heap;
259 ShenandoahMarkingContext* const _mark_context;
260 public:
261 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q) :
262 _queue(q),
263 _old_queue(old_q),
264 _heap(ShenandoahHeap::heap()),
265 _mark_context(_heap->marking_context())
266 {
267 }
268
269 void do_buffer(void **buffer, size_t size) {
270 assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
271 for (size_t i = 0; i < size; ++i) {
272 oop *p = (oop *) &buffer[i];
273 ShenandoahMark::mark_through_ref<oop, GENERATION>(p, _queue, _old_queue, _mark_context, false);
274 }
275 }
276 };
277
278 template<ShenandoahGenerationType GENERATION>
279 bool ShenandoahMark::in_generation(ShenandoahHeap* const heap, oop obj) {
280 // Each in-line expansion of in_generation() resolves GENERATION at compile time.
281 if (GENERATION == YOUNG) {
282 return heap->is_in_young(obj);
283 }
284
285 if (GENERATION == OLD) {
286 return heap->is_in_old(obj);
287 }
288
289 assert((GENERATION == GLOBAL || GENERATION == NON_GEN), "Unexpected generation type");
290 assert(heap->is_in(obj), "Object must be in heap");
291 return true;
292 }
293
294 template<class T, ShenandoahGenerationType GENERATION>
295 inline void ShenandoahMark::mark_through_ref(T *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
296 // Note: This is a very hot code path, so the code should be conditional on GENERATION template
297 // parameter where possible, in order to generate the most efficient code.
298
299 T o = RawAccess<>::oop_load(p);
300 if (!CompressedOops::is_null(o)) {
301 oop obj = CompressedOops::decode_not_null(o);
302
303 ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
304 shenandoah_assert_not_forwarded(p, obj);
305 shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc());
306 if (in_generation<GENERATION>(heap, obj)) {
307 mark_ref(q, mark_context, weak, obj);
308 shenandoah_assert_marked(p, obj);
309 if (GENERATION == YOUNG && heap->is_in_old(p)) {
310 // Mark card as dirty because remembered set scanning still finds interesting pointer.
311 heap->old_generation()->mark_card_as_dirty((HeapWord*)p);
312 } else if (GENERATION == GLOBAL && heap->is_in_old(p) && heap->is_in_young(obj)) {
313 // Mark card as dirty because GLOBAL marking finds interesting pointer.
314 heap->old_generation()->mark_card_as_dirty((HeapWord*)p);
315 }
316 } else if (old_q != nullptr) {
317 // Young mark, bootstrapping old_q or concurrent with old_q marking.
318 mark_ref(old_q, mark_context, weak, obj);
319 shenandoah_assert_marked(p, obj);
320 } else if (GENERATION == OLD) {
321 // Old mark, found a young pointer.
322 if (heap->is_in(p)) {
323 assert(heap->is_in_young(obj), "Expected young object.");
324 heap->old_generation()->mark_card_as_dirty(p);
325 }
326 }
327 }
328 }
329
330 template<>
331 inline void ShenandoahMark::mark_through_ref<oop, ShenandoahGenerationType::NON_GEN>(oop *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
332 mark_non_generational_ref(p, q, mark_context, weak);
333 }
334
335 template<>
336 inline void ShenandoahMark::mark_through_ref<narrowOop, ShenandoahGenerationType::NON_GEN>(narrowOop *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
337 mark_non_generational_ref(p, q, mark_context, weak);
338 }
339
340 template<class T>
341 inline void ShenandoahMark::mark_non_generational_ref(T* p, ShenandoahObjToScanQueue* q,
342 ShenandoahMarkingContext* const mark_context, bool weak) {
343 oop o = RawAccess<>::oop_load(p);
344 if (!CompressedOops::is_null(o)) {
345 oop obj = CompressedOops::decode_not_null(o);
346
347 shenandoah_assert_not_forwarded(p, obj);
348 shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
349
350 mark_ref(q, mark_context, weak, obj);
351
352 shenandoah_assert_marked(p, obj);
353 }
354 }
355
356 inline void ShenandoahMark::mark_ref(ShenandoahObjToScanQueue* q,
357 ShenandoahMarkingContext* const mark_context,
358 bool weak, oop obj) {
359 bool skip_live = false;
360 bool marked;
361 if (weak) {
362 marked = mark_context->mark_weak(obj);
363 } else {
364 marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
365 }
366 if (marked) {
367 bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
368 assert(pushed, "overflow queue should always succeed pushing");
369 }
370 }
371
372 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
373 return _task_queues;
374 }
375
376 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
377 return _task_queues->queue(index);
378 }
379
380 ShenandoahObjToScanQueue* ShenandoahMark::get_old_queue(uint index) const {
381 if (_old_gen_task_queues != nullptr) {
382 return _old_gen_task_queues->queue(index);
383 }
384 return nullptr;
385 }
386
387 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
|