67 oop obj = task->obj();
68
69 shenandoah_assert_not_forwarded(nullptr, obj);
70 shenandoah_assert_marked(nullptr, obj);
71 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
72
73 // Are we in weak subgraph scan?
74 bool weak = task->is_weak();
75 cl->set_weak(weak);
76
77 if (task->is_not_chunked()) {
78 if (obj->is_instance()) {
79 // Case 1: Normal oop, process as usual.
80 if (obj->is_stackChunk()) {
81 // Loom doesn't support mixing of weak marking and strong marking of stack chunks.
82 cl->set_weak(false);
83 }
84
85 obj->oop_iterate(cl);
86 dedup_string<STRING_DEDUP>(obj, req);
87 } else if (obj->is_objArray()) {
88 // Case 2: Object array instance and no chunk is set. Must be the first
89 // time we visit it, start the chunked processing.
90 do_chunked_array_start<T>(q, cl, obj, weak);
91 } else {
92 // Case 3: Primitive array. Do nothing, no oops there. We use the same
93 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
94 // We skip iterating over the klass pointer since we know that
95 // Universe::TypeArrayKlass never moves.
96 assert (obj->is_typeArray(), "should be type array");
97 }
98 // Count liveness the last: push the outstanding work to the queues first
99 // Avoid double-counting objects that are visited twice due to upgrade
100 // from final- to strong mark.
101 if (task->count_liveness()) {
102 count_liveness<GENERATION>(live_data, obj, worker_id);
103 }
104 } else {
105 // Case 4: Array chunk, has sensible chunk id. Process it.
106 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
107 }
136 } else {
137 // still good, remember in locals
138 live_data[region_idx] = (ShenandoahLiveData) new_val;
139 }
140 } else {
141 shenandoah_assert_in_correct_region(nullptr, obj);
142 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
143
144 assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region %zu", region_idx);
145 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
146 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
147 assert(chain_reg->is_humongous(), "Expecting a humongous region");
148 assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region %zu", i);
149 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
150 }
151 }
152 }
153
154 template <class T>
155 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
156 assert(obj->is_objArray(), "expect object array");
157 objArrayOop array = objArrayOop(obj);
158 int len = array->length();
159
160 // Mark objArray klass metadata
161 if (Devirtualizer::do_metadata(cl)) {
162 Devirtualizer::do_klass(cl, array->klass());
163 }
164
165 if (len <= (int) ObjArrayMarkingStride*2) {
166 // A few slices only, process directly
167 array->oop_iterate_elements_range(cl, 0, len);
168 } else {
169 int bits = log2i_graceful(len);
170 // Compensate for non-power-of-two arrays, cover the array in excess:
171 if (len != (1 << bits)) bits++;
172
173 // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
174 // boundaries against array->length(), touching the array header on every chunk.
175 //
176 // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.
203 if (left_chunk_end < len) {
204 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, left_chunk, pow));
205 assert(pushed, "overflow queue should always succeed pushing");
206 chunk = right_chunk;
207 last_idx = left_chunk_end;
208 } else {
209 chunk = left_chunk;
210 }
211 }
212
213 // Process the irregular tail, if present
214 int from = last_idx;
215 if (from < len) {
216 array->oop_iterate_elements_range(cl, from, len);
217 }
218 }
219 }
220
221 template <class T>
222 inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
223 assert(obj->is_objArray(), "expect object array");
224 objArrayOop array = objArrayOop(obj);
225
226 // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that
227 // are known to start beyond the array.
228 while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
229 pow--;
230 chunk *= 2;
231 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
232 assert(pushed, "overflow queue should always succeed pushing");
233 }
234
235 int chunk_size = 1 << pow;
236
237 int from = (chunk - 1) * chunk_size;
238 int to = chunk * chunk_size;
239
240 #ifdef ASSERT
241 int len = array->length();
242 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
243 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
|
67 oop obj = task->obj();
68
69 shenandoah_assert_not_forwarded(nullptr, obj);
70 shenandoah_assert_marked(nullptr, obj);
71 shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
72
73 // Are we in weak subgraph scan?
74 bool weak = task->is_weak();
75 cl->set_weak(weak);
76
77 if (task->is_not_chunked()) {
78 if (obj->is_instance()) {
79 // Case 1: Normal oop, process as usual.
80 if (obj->is_stackChunk()) {
81 // Loom doesn't support mixing of weak marking and strong marking of stack chunks.
82 cl->set_weak(false);
83 }
84
85 obj->oop_iterate(cl);
86 dedup_string<STRING_DEDUP>(obj, req);
87 } else if (obj->is_refArray()) {
88 // Case 2: Object array instance and no chunk is set. Must be the first
89 // time we visit it, start the chunked processing.
90 do_chunked_array_start<T>(q, cl, obj, weak);
91 } else {
92 // Case 3: Primitive array. Do nothing, no oops there. We use the same
93 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
94 // We skip iterating over the klass pointer since we know that
95 // Universe::TypeArrayKlass never moves.
96 assert (obj->is_typeArray(), "should be type array");
97 }
98 // Count liveness the last: push the outstanding work to the queues first
99 // Avoid double-counting objects that are visited twice due to upgrade
100 // from final- to strong mark.
101 if (task->count_liveness()) {
102 count_liveness<GENERATION>(live_data, obj, worker_id);
103 }
104 } else {
105 // Case 4: Array chunk, has sensible chunk id. Process it.
106 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
107 }
136 } else {
137 // still good, remember in locals
138 live_data[region_idx] = (ShenandoahLiveData) new_val;
139 }
140 } else {
141 shenandoah_assert_in_correct_region(nullptr, obj);
142 size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
143
144 assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region %zu", region_idx);
145 for (size_t i = region_idx; i < region_idx + num_regions; i++) {
146 ShenandoahHeapRegion* chain_reg = heap->get_region(i);
147 assert(chain_reg->is_humongous(), "Expecting a humongous region");
148 assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region %zu", i);
149 chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
150 }
151 }
152 }
153
154 template <class T>
155 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
156 assert(obj->is_refArray(), "expect object array");
157 objArrayOop array = objArrayOop(obj);
158 int len = array->length();
159
160 // Mark objArray klass metadata
161 if (Devirtualizer::do_metadata(cl)) {
162 Devirtualizer::do_klass(cl, array->klass());
163 }
164
165 if (len <= (int) ObjArrayMarkingStride*2) {
166 // A few slices only, process directly
167 array->oop_iterate_elements_range(cl, 0, len);
168 } else {
169 int bits = log2i_graceful(len);
170 // Compensate for non-power-of-two arrays, cover the array in excess:
171 if (len != (1 << bits)) bits++;
172
173 // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
174 // boundaries against array->length(), touching the array header on every chunk.
175 //
176 // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.
203 if (left_chunk_end < len) {
204 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, left_chunk, pow));
205 assert(pushed, "overflow queue should always succeed pushing");
206 chunk = right_chunk;
207 last_idx = left_chunk_end;
208 } else {
209 chunk = left_chunk;
210 }
211 }
212
213 // Process the irregular tail, if present
214 int from = last_idx;
215 if (from < len) {
216 array->oop_iterate_elements_range(cl, from, len);
217 }
218 }
219 }
220
221 template <class T>
222 inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
223 assert(obj->is_refArray(), "expect object array");
224 objArrayOop array = objArrayOop(obj);
225
226 // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that
227 // are known to start beyond the array.
228 while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
229 pow--;
230 chunk *= 2;
231 bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
232 assert(pushed, "overflow queue should always succeed pushing");
233 }
234
235 int chunk_size = 1 << pow;
236
237 int from = (chunk - 1) * chunk_size;
238 int to = chunk * chunk_size;
239
240 #ifdef ASSERT
241 int len = array->length();
242 assert (0 <= from && from < len, "from is sane: %d/%d", from, len);
243 assert (0 < to && to <= len, "to is sane: %d/%d", to, len);
|