< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp

Print this page

 68 
 69   shenandoah_assert_not_forwarded(nullptr, obj);
 70   shenandoah_assert_marked(nullptr, obj);
 71   shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
 72 
 73   // Are we in weak subgraph scan?
 74   bool weak = task->is_weak();
 75   cl->set_weak(weak);
 76 
 77   if (task->is_not_chunked()) {
 78     if (obj->is_instance()) {
 79       // Case 1: Normal oop, process as usual.
 80       if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
 81           // Loom doesn't support mixing of weak marking and strong marking of
 82           // stack chunks.
 83           cl->set_weak(false);
 84       }
 85 
 86       obj->oop_iterate(cl);
 87       dedup_string<STRING_DEDUP>(obj, req);
 88     } else if (obj->is_objArray()) {
 89       // Case 2: Object array instance and no chunk is set. Must be the first
 90       // time we visit it, start the chunked processing.
 91       do_chunked_array_start<T>(q, cl, obj, weak);
 92     } else {
 93       // Case 3: Primitive array. Do nothing, no oops there. We use the same
 94       // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
 95       // We skip iterating over the klass pointer since we know that
 96       // Universe::TypeArrayKlass never moves.
 97       assert (obj->is_typeArray(), "should be type array");
 98     }
 99     // Count liveness the last: push the outstanding work to the queues first
100     // Avoid double-counting objects that are visited twice due to upgrade
101     // from final- to strong mark.
102     if (task->count_liveness()) {
103       count_liveness<GENERATION>(live_data, obj, worker_id);
104     }
105   } else {
106     // Case 4: Array chunk, has sensible chunk id. Process it.
107     do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
108   }

139     } else {
140       // still good, remember in locals
141       live_data[region_idx] = (ShenandoahLiveData) new_val;
142     }
143   } else {
144     shenandoah_assert_in_correct_region(nullptr, obj);
145     size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
146 
147     assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region %zu", region_idx);
148     for (size_t i = region_idx; i < region_idx + num_regions; i++) {
149       ShenandoahHeapRegion* chain_reg = heap->get_region(i);
150       assert(chain_reg->is_humongous(), "Expecting a humongous region");
151       assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region %zu", i);
152       chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
153     }
154   }
155 }
156 
157 template <class T>
158 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
159   assert(obj->is_objArray(), "expect object array");
160   objArrayOop array = objArrayOop(obj);
161   int len = array->length();
162 
163   // Mark objArray klass metadata
164   if (Devirtualizer::do_metadata(cl)) {
165     Devirtualizer::do_klass(cl, array->klass());
166   }
167 
168   if (len <= (int) ObjArrayMarkingStride*2) {
169     // A few slices only, process directly
170     array->oop_iterate_range(cl, 0, len);
171   } else {
172     int bits = log2i_graceful(len);
173     // Compensate for non-power-of-two arrays, cover the array in excess:
174     if (len != (1 << bits)) bits++;
175 
176     // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
177     // boundaries against array->length(), touching the array header on every chunk.
178     //
179     // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.

206       if (left_chunk_end < len) {
207         bool pushed = q->push(ShenandoahMarkTask(array, true, weak, left_chunk, pow));
208         assert(pushed, "overflow queue should always succeed pushing");
209         chunk = right_chunk;
210         last_idx = left_chunk_end;
211       } else {
212         chunk = left_chunk;
213       }
214     }
215 
216     // Process the irregular tail, if present
217     int from = last_idx;
218     if (from < len) {
219       array->oop_iterate_range(cl, from, len);
220     }
221   }
222 }
223 
224 template <class T>
225 inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
226   assert(obj->is_objArray(), "expect object array");
227   objArrayOop array = objArrayOop(obj);
228 
229   assert (ObjArrayMarkingStride > 0, "sanity");
230 
231   // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that
232   // are known to start beyond the array.
233   while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
234     pow--;
235     chunk *= 2;
236     bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
237     assert(pushed, "overflow queue should always succeed pushing");
238   }
239 
240   int chunk_size = 1 << pow;
241 
242   int from = (chunk - 1) * chunk_size;
243   int to = chunk * chunk_size;
244 
245 #ifdef ASSERT
246   int len = array->length();

 68 
 69   shenandoah_assert_not_forwarded(nullptr, obj);
 70   shenandoah_assert_marked(nullptr, obj);
 71   shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
 72 
 73   // Are we in weak subgraph scan?
 74   bool weak = task->is_weak();
 75   cl->set_weak(weak);
 76 
 77   if (task->is_not_chunked()) {
 78     if (obj->is_instance()) {
 79       // Case 1: Normal oop, process as usual.
 80       if (ContinuationGCSupport::relativize_stack_chunk(obj)) {
 81           // Loom doesn't support mixing of weak marking and strong marking of
 82           // stack chunks.
 83           cl->set_weak(false);
 84       }
 85 
 86       obj->oop_iterate(cl);
 87       dedup_string<STRING_DEDUP>(obj, req);
 88     } else if (obj->is_refArray()) {
 89       // Case 2: Object array instance and no chunk is set. Must be the first
 90       // time we visit it, start the chunked processing.
 91       do_chunked_array_start<T>(q, cl, obj, weak);
 92     } else {
 93       // Case 3: Primitive array. Do nothing, no oops there. We use the same
 94       // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using:
 95       // We skip iterating over the klass pointer since we know that
 96       // Universe::TypeArrayKlass never moves.
 97       assert (obj->is_typeArray(), "should be type array");
 98     }
 99     // Count liveness the last: push the outstanding work to the queues first
100     // Avoid double-counting objects that are visited twice due to upgrade
101     // from final- to strong mark.
102     if (task->count_liveness()) {
103       count_liveness<GENERATION>(live_data, obj, worker_id);
104     }
105   } else {
106     // Case 4: Array chunk, has sensible chunk id. Process it.
107     do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow(), weak);
108   }

139     } else {
140       // still good, remember in locals
141       live_data[region_idx] = (ShenandoahLiveData) new_val;
142     }
143   } else {
144     shenandoah_assert_in_correct_region(nullptr, obj);
145     size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
146 
147     assert(region->is_affiliated(), "Do not count live data within FREE Humongous Start Region %zu", region_idx);
148     for (size_t i = region_idx; i < region_idx + num_regions; i++) {
149       ShenandoahHeapRegion* chain_reg = heap->get_region(i);
150       assert(chain_reg->is_humongous(), "Expecting a humongous region");
151       assert(chain_reg->is_affiliated(), "Do not count live data within FREE Humongous Continuation Region %zu", i);
152       chain_reg->increase_live_data_gc_words(chain_reg->used() >> LogHeapWordSize);
153     }
154   }
155 }
156 
157 template <class T>
158 inline void ShenandoahMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj, bool weak) {
159   assert(obj->is_refArray(), "expect object array");
160   objArrayOop array = objArrayOop(obj);
161   int len = array->length();
162 
163   // Mark objArray klass metadata
164   if (Devirtualizer::do_metadata(cl)) {
165     Devirtualizer::do_klass(cl, array->klass());
166   }
167 
168   if (len <= (int) ObjArrayMarkingStride*2) {
169     // A few slices only, process directly
170     array->oop_iterate_range(cl, 0, len);
171   } else {
172     int bits = log2i_graceful(len);
173     // Compensate for non-power-of-two arrays, cover the array in excess:
174     if (len != (1 << bits)) bits++;
175 
176     // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to
177     // boundaries against array->length(), touching the array header on every chunk.
178     //
179     // To do this, we cut the prefix in full-sized chunks, and submit them on the queue.

206       if (left_chunk_end < len) {
207         bool pushed = q->push(ShenandoahMarkTask(array, true, weak, left_chunk, pow));
208         assert(pushed, "overflow queue should always succeed pushing");
209         chunk = right_chunk;
210         last_idx = left_chunk_end;
211       } else {
212         chunk = left_chunk;
213       }
214     }
215 
216     // Process the irregular tail, if present
217     int from = last_idx;
218     if (from < len) {
219       array->oop_iterate_range(cl, from, len);
220     }
221   }
222 }
223 
224 template <class T>
225 inline void ShenandoahMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow, bool weak) {
226   assert(obj->is_refArray(), "expect object array");
227   objArrayOop array = objArrayOop(obj);
228 
229   assert (ObjArrayMarkingStride > 0, "sanity");
230 
231   // Split out tasks, as suggested in ShenandoahMarkTask docs. Avoid pushing tasks that
232   // are known to start beyond the array.
233   while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) {
234     pow--;
235     chunk *= 2;
236     bool pushed = q->push(ShenandoahMarkTask(array, true, weak, chunk - 1, pow));
237     assert(pushed, "overflow queue should always succeed pushing");
238   }
239 
240   int chunk_size = 1 << pow;
241 
242   int from = (chunk - 1) * chunk_size;
243   int to = chunk * chunk_size;
244 
245 #ifdef ASSERT
246   int len = array->length();
< prev index next >