162 // [left, right) is a maximal right-open interval of dirty cards
163 HeapWord* left = _rs->addr_for_card_index(dirty_l); // inclusive
164 HeapWord* right = _rs->addr_for_card_index(dirty_r + 1); // exclusive
165 // Clip right to end_addr established above (still exclusive)
166 right = MIN2(right, end_addr);
167 assert(right <= region->top() && end_addr <= region->top(), "Busted bounds");
168 const MemRegion mr(left, right);
169
170 // NOTE: We'll not call block_start() repeatedly
171 // on a very large object if its head card is dirty. If not,
172 // (i.e. the head card is clean) we'll call it each time we
173 // process a new dirty range on the object. This is always
174 // the case for large object arrays, which are typically more
175 // common.
176 HeapWord* p = _scc->block_start(dirty_l);
177 oop obj = cast_to_oop(p);
178
179 // PREFIX: The object that straddles into this range of dirty cards
180 // from the left may be subject to special treatment unless
181 // it is an object array.
182 if (p < left && !obj->is_objArray()) {
183 // The mutator (both compiler and interpreter, but not JNI?)
184 // typically dirty imprecisely (i.e. only the head of an object),
185 // but GC closures typically dirty the object precisely. (It would
186 // be nice to have everything be precise for maximum efficiency.)
187 //
188 // To handle this, we check the head card of the object here and,
189 // if dirty, (arrange to) scan the object in its entirety. If we
190 // find the head card clean, we'll scan only the portion of the
191 // object lying in the dirty card range below, assuming this was
192 // the result of precise marking by GC closures.
193
194 // index of the "head card" for p
195 const size_t hc_index = _rs->card_index_for_addr(p);
196 if (ctbm[hc_index] == CardTable::dirty_card_val()) {
197 // Scan or skip the object, depending on location of its
198 // head card, and remember that we'll have processed all
199 // the objects back up to p, which is thus an upper bound
200 // for the next iteration of a dirty card loop.
201 upper_bound = p; // remember upper bound for next chunk
202 if (p < start_addr) {
236 // apply the closure to the oops in the portion of
237 // the object within mr.
238 p += obj->oop_iterate_size(cl, mr);
239 NOT_PRODUCT(i++);
240 } else {
241 // forget the last object pointer we remembered
242 last_p = nullptr;
243 assert(p < tams, "Tams and above are implicitly marked in ctx");
244 // object under tams isn't marked: skip to next live object
245 p = ctx->get_next_marked_addr(p, tams);
246 assert(p <= tams, "Error 3 in ctx/marking/tams logic");
247 }
248 }
249
250 // SUFFIX: Fix up a possible incomplete scan at right end of window
251 // by scanning the portion of a non-objArray that wasn't done.
252 if (p > right && last_p != nullptr) {
253 assert(last_p < right, "Error");
254 // check if last_p suffix needs scanning
255 const oop last_obj = cast_to_oop(last_p);
256 if (!last_obj->is_objArray()) {
257 // scan the remaining suffix of the object
258 const MemRegion last_mr(right, p);
259 assert(p == last_p + last_obj->size(), "Would miss portion of last_obj");
260 last_obj->oop_iterate(cl, last_mr);
261 log_develop_debug(gc, remset)("Fixed up non-objArray suffix scan in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
262 p2i(last_mr.start()), p2i(last_mr.end()));
263 } else {
264 log_develop_debug(gc, remset)("Skipped suffix scan of objArray in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
265 p2i(right), p2i(p));
266 }
267 }
268 NOT_PRODUCT(stats.record_scan_obj_cnt(i);)
269
270 // ==== END DIRTY card range processing ====
271 } else {
272 // ==== BEGIN CLEAN card range processing ====
273
274 // If we are using the write table (during update refs, e.g.), a mutator may dirty
275 // a card at any time. This is fine for the algorithm below because it is only
276 // counting contiguous runs of clean cards (and only for non-product builds).
|
162 // [left, right) is a maximal right-open interval of dirty cards
163 HeapWord* left = _rs->addr_for_card_index(dirty_l); // inclusive
164 HeapWord* right = _rs->addr_for_card_index(dirty_r + 1); // exclusive
165 // Clip right to end_addr established above (still exclusive)
166 right = MIN2(right, end_addr);
167 assert(right <= region->top() && end_addr <= region->top(), "Busted bounds");
168 const MemRegion mr(left, right);
169
170 // NOTE: We'll not call block_start() repeatedly
171 // on a very large object if its head card is dirty. If not,
172 // (i.e. the head card is clean) we'll call it each time we
173 // process a new dirty range on the object. This is always
174 // the case for large object arrays, which are typically more
175 // common.
176 HeapWord* p = _scc->block_start(dirty_l);
177 oop obj = cast_to_oop(p);
178
179 // PREFIX: The object that straddles into this range of dirty cards
180 // from the left may be subject to special treatment unless
181 // it is an object array.
182 if (p < left && !obj->is_refArray()) {
183 // The mutator (both compiler and interpreter, but not JNI?)
184 // typically dirty imprecisely (i.e. only the head of an object),
185 // but GC closures typically dirty the object precisely. (It would
186 // be nice to have everything be precise for maximum efficiency.)
187 //
188 // To handle this, we check the head card of the object here and,
189 // if dirty, (arrange to) scan the object in its entirety. If we
190 // find the head card clean, we'll scan only the portion of the
191 // object lying in the dirty card range below, assuming this was
192 // the result of precise marking by GC closures.
193
194 // index of the "head card" for p
195 const size_t hc_index = _rs->card_index_for_addr(p);
196 if (ctbm[hc_index] == CardTable::dirty_card_val()) {
197 // Scan or skip the object, depending on location of its
198 // head card, and remember that we'll have processed all
199 // the objects back up to p, which is thus an upper bound
200 // for the next iteration of a dirty card loop.
201 upper_bound = p; // remember upper bound for next chunk
202 if (p < start_addr) {
236 // apply the closure to the oops in the portion of
237 // the object within mr.
238 p += obj->oop_iterate_size(cl, mr);
239 NOT_PRODUCT(i++);
240 } else {
241 // forget the last object pointer we remembered
242 last_p = nullptr;
243 assert(p < tams, "Tams and above are implicitly marked in ctx");
244 // object under tams isn't marked: skip to next live object
245 p = ctx->get_next_marked_addr(p, tams);
246 assert(p <= tams, "Error 3 in ctx/marking/tams logic");
247 }
248 }
249
250 // SUFFIX: Fix up a possible incomplete scan at right end of window
251 // by scanning the portion of a non-objArray that wasn't done.
252 if (p > right && last_p != nullptr) {
253 assert(last_p < right, "Error");
254 // check if last_p suffix needs scanning
255 const oop last_obj = cast_to_oop(last_p);
256 if (!last_obj->is_refArray()) {
257 // scan the remaining suffix of the object
258 const MemRegion last_mr(right, p);
259 assert(p == last_p + last_obj->size(), "Would miss portion of last_obj");
260 last_obj->oop_iterate(cl, last_mr);
261 log_develop_debug(gc, remset)("Fixed up non-objArray suffix scan in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
262 p2i(last_mr.start()), p2i(last_mr.end()));
263 } else {
264 log_develop_debug(gc, remset)("Skipped suffix scan of objArray in [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
265 p2i(right), p2i(p));
266 }
267 }
268 NOT_PRODUCT(stats.record_scan_obj_cnt(i);)
269
270 // ==== END DIRTY card range processing ====
271 } else {
272 // ==== BEGIN CLEAN card range processing ====
273
274 // If we are using the write table (during update refs, e.g.), a mutator may dirty
275 // a card at any time. This is fine for the algorithm below because it is only
276 // counting contiguous runs of clean cards (and only for non-product builds).
|