1 /*
2 * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
39 #include "oops/compressedOops.inline.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "runtime/prefetch.inline.hpp"
42 #include "utilities/devirtualizer.inline.hpp"
43 #include "utilities/powerOfTwo.hpp"
44
45 template <StringDedupMode STRING_DEDUP>
46 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
47 if (STRING_DEDUP == ENQUEUE_DEDUP) {
48 if (ShenandoahStringDedup::is_candidate(obj)) {
49 req->add(obj);
50 }
51 } else if (STRING_DEDUP == ALWAYS_DEDUP) {
52 if (ShenandoahStringDedup::is_string_candidate(obj) &&
53 !ShenandoahStringDedup::dedup_requested(obj)) {
54 req->add(obj);
55 }
56 }
57 }
58
59 template <class T, StringDedupMode STRING_DEDUP>
60 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task) {
61 oop obj = task->obj();
62
63 shenandoah_assert_not_forwarded(nullptr, obj);
64 shenandoah_assert_marked(nullptr, obj);
65 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
66
67 // Are we in weak subgraph scan?
68 bool weak = task->is_weak();
69 cl->set_weak(weak);
70
71 if (task->is_not_chunked()) {
72 if (obj->is_instance()) {
73 // Case 1: Normal oop, process as usual.
74 if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
75 // Loom doesn't support mixing of weak marking and strong marking of
76 // stack chunks.
77 cl->set_weak(false);
78 }
79
80 obj->oop_iterate(cl);
81 dedup_string<STRING_DEDUP>(obj, req);
82 } else if (obj->is_objArray()) {
83 // Case 2: Object array instance and no chunk is set. Must be the first
84 // time we visit it, start the chunked processing.
85 do_chunked_array_start<T>(q, cl, obj, weak);
86 } else {
87 // Case 3: Primitive array. Do nothing, no oops there. We use the same
88 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
89 // We skip iterating over the klass pointer since we know that
90 // Universe::TypeArrayKlass never moves.
91 assert (obj->is_typeArray(), "should be type array");
92 }
93 // Count liveness the last: push the outstanding work to the queues first
94 // Avoid double-counting objects that are visited twice due to upgrade
95 // from final- to strong mark.
96 if (task->count_liveness()) {
97 count_liveness(live_data, obj);
98 }
99 } else {
100 // Case 4: Array chunk, has sensible chunk id. Process it.
101 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
102 }
103 }
104
105 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj) {
106 ShenandoahHeap* const heap = ShenandoahHeap::heap();
107 size_t region_idx = heap->heap_region_index_containing(obj);
108 ShenandoahHeapRegion* region = heap->get_region(region_idx);
109 size_t size = obj->size();
110
111 if (!region->is_humongous_start()) {
112 assert(!region->is_humongous(), "Cannot have continuations here");
113 ShenandoahLiveData cur = live_data[region_idx];
114 size_t new_val = size + cur;
115 if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
116 // overflow, flush to region data
117 region->increase_live_data_gc_words(new_val);
118 live_data[region_idx] = 0;
119 } else {
120 // still good, remember in locals
121 live_data[region_idx] = (ShenandoahLiveData) new_val;
122 }
123 } else {
124 shenandoah_assert_in_correct_region(nullptr, obj);
125 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
126
127 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
128 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
129 assert(chain_reg->is_humongous(), "Expecting a humongous region");
130 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
131 }
132 }
133 }
134
135 template <class T>
136 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
137 assert(obj->is_objArray(), "expect object array");
138 objArrayOop array = objArrayOop(obj);
139 int len = array->length();
140
141 // Mark objArray klass metadata
142 if (Devirtualizer::do_metadata(cl)) {
143 Devirtualizer::do_klass(cl, array->klass());
144 }
145
146 if (len <= (int) ObjArrayMarkingStride*2) {
147 // A few slices only, process directly
148 array->oop_iterate_range(cl, 0, len);
149 } else {
212 pow--;
213 chunk *= 2;
214 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
215 assert(pushed, "overflow queue should always succeed pushing");
216 }
217
218 int chunk_size = 1 << pow;
219
220 int from = (chunk - 1) * chunk_size;
221 int to = chunk * chunk_size;
222
223 #ifdef ASSERT
224 int len = array->length();
225 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
226 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
227 #endif
228
229 array->oop_iterate_range(cl, from, to);
230 }
231
232 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
233 private:
234 ShenandoahObjToScanQueue* _queue;
235 ShenandoahHeap* _heap;
236 ShenandoahMarkingContext* const _mark_context;
237 public:
238 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) :
239 _queue(q),
240 _heap(ShenandoahHeap::heap()),
241 _mark_context(_heap->marking_context())
242 {
243 }
244
245 void do_buffer(void **buffer, size_t size) {
246 assert(size == 0 || !_heap->has_forwarded_objects(), "Forwarded objects are not expected here");
247 for (size_t i = 0; i < size; ++i) {
248 oop *p = (oop *) &buffer[i];
249 ShenandoahMark::mark_through_ref<oop>(p, _queue, _mark_context, false);
250 }
251 }
252 };
253
254 template<class T>
255 inline void ShenandoahMark::mark_through_ref(T* p, ShenandoahObjToScanQueue* q, ShenandoahMarkingContext* const mark_context, bool weak) {
256 T o = RawAccess<>::oop_load(p);
257 if (!CompressedOops::is_null(o)) {
258 oop obj = CompressedOops::decode_not_null(o);
259
260 shenandoah_assert_not_forwarded(p, obj);
261 shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
262
263 bool skip_live = false;
264 bool marked;
265 if (weak) {
266 marked = mark_context->mark_weak(obj);
267 } else {
268 marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
269 }
270 if (marked) {
271 bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
272 assert(pushed, "overflow queue should always succeed pushing");
273 }
274
275 shenandoah_assert_marked(p, obj);
276 }
277 }
278
279 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
280 return _task_queues;
281 }
282
283 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
284 return _task_queues->queue(index);
285 }
286 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
|
1 /*
2 * Copyright (c) 2015, 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
40 #include "oops/compressedOops.inline.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/prefetch.inline.hpp"
43 #include "utilities/devirtualizer.inline.hpp"
44 #include "utilities/powerOfTwo.hpp"
45
46 template <StringDedupMode STRING_DEDUP>
47 void ShenandoahMark::dedup_string(oop obj, StringDedup::Requests* const req) {
48 if (STRING_DEDUP == ENQUEUE_DEDUP) {
49 if (ShenandoahStringDedup::is_candidate(obj)) {
50 req->add(obj);
51 }
52 } else if (STRING_DEDUP == ALWAYS_DEDUP) {
53 if (ShenandoahStringDedup::is_string_candidate(obj) &&
54 !ShenandoahStringDedup::dedup_requested(obj)) {
55 req->add(obj);
56 }
57 }
58 }
59
60 template <class T, ShenandoahGenerationType GENERATION, StringDedupMode STRING_DEDUP>
61 void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task, uint worker_id) {
62 oop obj = task->obj();
63
64 // TODO: This will push array chunks into the mark queue with no regard for
65 // generations. I don't think it will break anything, but the young generation
66 // scan might end up processing some old generation array chunks.
67
68 shenandoah_assert_not_forwarded(nullptr, obj);
69 shenandoah_assert_marked(nullptr, obj);
70 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
71
72 // Are we in weak subgraph scan?
73 bool weak = task->is_weak();
74 cl->set_weak(weak);
75
76 if (task->is_not_chunked()) {
77 if (obj->is_instance()) {
78 // Case 1: Normal oop, process as usual.
79 if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
80 // Loom doesn't support mixing of weak marking and strong marking of
81 // stack chunks.
82 cl->set_weak(false);
83 }
84
85 obj->oop_iterate(cl);
86 dedup_string<STRING_DEDUP>(obj, req);
87 } else if (obj->is_objArray()) {
88 // Case 2: Object array instance and no chunk is set. Must be the first
89 // time we visit it, start the chunked processing.
90 do_chunked_array_start<T>(q, cl, obj, weak);
91 } else {
92 // Case 3: Primitive array. Do nothing, no oops there. We use the same
93 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
94 // We skip iterating over the klass pointer since we know that
95 // Universe::TypeArrayKlass never moves.
96 assert (obj->is_typeArray(), "should be type array");
97 }
98 // Count liveness the last: push the outstanding work to the queues first
99 // Avoid double-counting objects that are visited twice due to upgrade
100 // from final- to strong mark.
101 if (task->count_liveness()) {
102 count_liveness<GENERATION>(live_data, obj, worker_id);
103 }
104 } else {
105 // Case 4: Array chunk, has sensible chunk id. Process it.
106 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
107 }
108 }
109
110 template <ShenandoahGenerationType GENERATION>
111 inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop obj, uint worker_id) {
112 const ShenandoahHeap* const heap = ShenandoahHeap::heap();
113 const size_t region_idx = heap->heap_region_index_containing(obj);
114 ShenandoahHeapRegion* const region = heap->get_region(region_idx);
115 const size_t size = obj->size();
116
117 // Age census for objects in the young generation
118 if (GENERATION == YOUNG || (GENERATION == GLOBAL_GEN && region->is_young())) {
119 assert(heap->mode()->is_generational(), "Only if generational");
120 if (ShenandoahGenerationalAdaptiveTenuring && !ShenandoahGenerationalCensusAtEvac) {
121 assert(region->is_young(), "Only for young objects");
122 uint age = ShenandoahHeap::get_object_age_concurrent(obj);
123 CENSUS_NOISE(heap->age_census()->add(age, region->age(), region->youth(), size, worker_id);)
124 NO_CENSUS_NOISE(heap->age_census()->add(age, region->age(), size, worker_id);)
125 }
126 }
127
128 if (!region->is_humongous_start()) {
129 assert(!region->is_humongous(), "Cannot have continuations here");
130 assert(region->is_affiliated(), "Do not count live data within Free Regular Region " SIZE_FORMAT, region_idx);
131 ShenandoahLiveData cur = live_data[region_idx];
132 size_t new_val = size + cur;
133 if (new_val >= SHENANDOAH_LIVEDATA_MAX) {
134 // overflow, flush to region data
135 region->increase_live_data_gc_words(new_val);
136 live_data[region_idx] = 0;
137 } else {
138 // still good, remember in locals
139 live_data[region_idx] = (ShenandoahLiveData) new_val;
140 }
141 } else {
142 shenandoah_assert_in_correct_region(nullptr, obj);
143 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
144
145 assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region " SIZE_FORMAT, region_idx);
146 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
147 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
148 assert(chain_reg->is_humongous(), "Expecting a humongous region");
149 assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region " SIZE_FORMAT, i);
150 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
151 }
152 }
153 }
154
155 template <class T>
156 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
157 assert(obj->is_objArray(), "expect object array");
158 objArrayOop array = objArrayOop(obj);
159 int len = array->length();
160
161 // Mark objArray klass metadata
162 if (Devirtualizer::do_metadata(cl)) {
163 Devirtualizer::do_klass(cl, array->klass());
164 }
165
166 if (len <= (int) ObjArrayMarkingStride*2) {
167 // A few slices only, process directly
168 array->oop_iterate_range(cl, 0, len);
169 } else {
232 pow--;
233 chunk *= 2;
234 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
235 assert(pushed, "overflow queue should always succeed pushing");
236 }
237
238 int chunk_size = 1 << pow;
239
240 int from = (chunk - 1) * chunk_size;
241 int to = chunk * chunk_size;
242
243 #ifdef ASSERT
244 int len = array->length();
245 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
246 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
247 #endif
248
249 array->oop_iterate_range(cl, from, to);
250 }
251
252 template <ShenandoahGenerationType GENERATION>
253 class ShenandoahSATBBufferClosure : public SATBBufferClosure {
254 private:
255 ShenandoahObjToScanQueue* _queue;
256 ShenandoahObjToScanQueue* _old_queue;
257 ShenandoahHeap* _heap;
258 ShenandoahMarkingContext* const _mark_context;
259 public:
260 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q) :
261 _queue(q),
262 _old_queue(old_q),
263 _heap(ShenandoahHeap::heap()),
264 _mark_context(_heap->marking_context())
265 {
266 }
267
268 void do_buffer(void **buffer, size_t size) {
269 assert(size == 0 || !_heap->has_forwarded_objects() || _heap->is_concurrent_old_mark_in_progress(), "Forwarded objects are not expected here");
270 for (size_t i = 0; i < size; ++i) {
271 oop *p = (oop *) &buffer[i];
272 ShenandoahMark::mark_through_ref<oop, GENERATION>(p, _queue, _old_queue, _mark_context, false);
273 }
274 }
275 };
276
277 template<ShenandoahGenerationType GENERATION>
278 bool ShenandoahMark::in_generation(ShenandoahHeap* const heap, oop obj) {
279 // Each in-line expansion of in_generation() resolves GENERATION at compile time.
280 if (GENERATION == YOUNG) {
281 return heap->is_in_young(obj);
282 } else if (GENERATION == OLD) {
283 return heap->is_in_old(obj);
284 } else if (GENERATION == GLOBAL_GEN || GENERATION == GLOBAL_NON_GEN) {
285 return true;
286 } else {
287 return false;
288 }
289 }
290
291 template<class T, ShenandoahGenerationType GENERATION>
292 inline void ShenandoahMark::mark_through_ref(T *p, ShenandoahObjToScanQueue* q, ShenandoahObjToScanQueue* old_q, ShenandoahMarkingContext* const mark_context, bool weak) {
293 // Note: This is a very hot code path, so the code should be conditional on GENERATION template
294 // parameter where possible, in order to generate the most efficient code.
295
296 T o = RawAccess<>::oop_load(p);
297 if (!CompressedOops::is_null(o)) {
298 oop obj = CompressedOops::decode_not_null(o);
299
300 ShenandoahHeap* heap = ShenandoahHeap::heap();
301 shenandoah_assert_not_forwarded(p, obj);
302 shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_gc());
303 if (in_generation<GENERATION>(heap, obj)) {
304 mark_ref(q, mark_context, weak, obj);
305 shenandoah_assert_marked(p, obj);
306 // TODO: As implemented herein, GLOBAL_GEN collections reconstruct the card table during GLOBAL_GEN concurrent
307 // marking. Note that the card table is cleaned at init_mark time so it needs to be reconstructed to support
308 // future young-gen collections. It might be better to reconstruct card table in
309 // ShenandoahHeapRegion::global_oop_iterate_and_fill_dead. We could either mark all live memory as dirty, or could
310 // use the GLOBAL update-refs scanning of pointers to determine precisely which cards to flag as dirty.
311 if (GENERATION == YOUNG && heap->is_in_old(p)) {
312 // Mark card as dirty because remembered set scanning still finds interesting pointer.
313 heap->mark_card_as_dirty((HeapWord*)p);
314 } else if (GENERATION == GLOBAL_GEN && heap->is_in_old(p) && heap->is_in_young(obj)) {
315 // Mark card as dirty because GLOBAL marking finds interesting pointer.
316 heap->mark_card_as_dirty((HeapWord*)p);
317 }
318 } else if (old_q != nullptr) {
319 // Young mark, bootstrapping old_q or concurrent with old_q marking.
320 mark_ref(old_q, mark_context, weak, obj);
321 shenandoah_assert_marked(p, obj);
322 } else if (GENERATION == OLD) {
323 // Old mark, found a young pointer.
324 // TODO: Rethink this: may be redundant with dirtying of cards identified during young-gen remembered set scanning
325 // and by mutator write barriers. Assert
326 if (heap->is_in(p)) {
327 assert(heap->is_in_young(obj), "Expected young object.");
328 heap->mark_card_as_dirty(p);
329 }
330 }
331 }
332 }
333
334 inline void ShenandoahMark::mark_ref(ShenandoahObjToScanQueue* q,
335 ShenandoahMarkingContext* const mark_context,
336 bool weak, oop obj) {
337 bool skip_live = false;
338 bool marked;
339 if (weak) {
340 marked = mark_context->mark_weak(obj);
341 } else {
342 marked = mark_context->mark_strong(obj, /* was_upgraded = */ skip_live);
343 }
344 if (marked) {
345 bool pushed = q->push(ShenandoahMarkTask(obj, skip_live, weak));
346 assert(pushed, "overflow queue should always succeed pushing");
347 }
348 }
349
350 ShenandoahObjToScanQueueSet* ShenandoahMark::task_queues() const {
351 return _task_queues;
352 }
353
354 ShenandoahObjToScanQueue* ShenandoahMark::get_queue(uint index) const {
355 return _task_queues->queue(index);
356 }
357
358 ShenandoahObjToScanQueue* ShenandoahMark::get_old_queue(uint index) const {
359 if (_old_gen_task_queues != nullptr) {
360 return _old_gen_task_queues->queue(index);
361 }
362 return nullptr;
363 }
364
365 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_INLINE_HPP
|