225 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
226 if (_young_to_region != nullptr) {
227 log_debug(gc)("Worker %u planned compaction into Young Region %zu, used: %zu",
228 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
229 _young_to_region->set_new_top(_young_compact_point);
230 _young_to_region = nullptr;
231 }
232 }
233
234 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
235 return (_from_region == _old_to_region) || (_from_region == _young_to_region);
236 }
237
238 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
239 assert(_from_region != nullptr, "must set before work");
240 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
241 "Object must reside in _from_region");
242 assert(_heap->global_generation()->complete_marking_context()->is_marked(p), "must be marked");
243 assert(!_heap->global_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
244
245 size_t obj_size = p->size();
246 uint from_region_age = _from_region->age();
247 uint object_age = p->age();
248
249 bool promote_object = false;
250 if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
251 _heap->age_census()->is_tenurable(from_region_age + object_age)) {
252 if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) {
253 finish_old_region();
254 _old_to_region = nullptr;
255 }
256 if (_old_to_region == nullptr) {
257 if (_empty_regions_pos < _empty_regions.length()) {
258 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
259 _empty_regions_pos++;
260 new_to_region->set_affiliation(OLD_GENERATION);
261 _old_to_region = new_to_region;
262 _old_compact_point = _old_to_region->bottom();
263 promote_object = true;
264 }
265 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
266 // we leave promote_object as false, deferring the promotion.
267 } else {
268 promote_object = true;
269 }
270 }
271
272 if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
273 assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
274 if (_old_compact_point + obj_size > _old_to_region->end()) {
275 ShenandoahHeapRegion* new_to_region;
276
277 log_debug(gc)("Worker %u finishing old region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
278 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(),
279 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
280
281 // Object does not fit. Get a new _old_to_region.
282 finish_old_region();
283 if (_empty_regions_pos < _empty_regions.length()) {
284 new_to_region = _empty_regions.at(_empty_regions_pos);
285 _empty_regions_pos++;
286 new_to_region->set_affiliation(OLD_GENERATION);
287 } else {
288 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
289 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
290 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
291 new_to_region = _from_region;
292 }
293
294 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
295 assert(new_to_region != nullptr, "must not be nullptr");
296 _old_to_region = new_to_region;
297 _old_compact_point = _old_to_region->bottom();
298 }
299
300 // Object fits into current region, record new location, if object does not move:
301 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
302 shenandoah_assert_not_forwarded(nullptr, p);
303 if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
304 _preserved_marks->push_if_necessary(p, p->mark());
305 FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
306 }
307 _old_compact_point += obj_size;
308 } else {
309 assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
310 "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
311 assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
312
313 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve
314 // tenuring progress.
315 if (_heap->is_aging_cycle()) {
316 ShenandoahHeap::increase_object_age(p, from_region_age + 1);
317 } else {
318 ShenandoahHeap::increase_object_age(p, from_region_age);
319 }
320
321 if (_young_compact_point + obj_size > _young_to_region->end()) {
322 ShenandoahHeapRegion* new_to_region;
323
324 log_debug(gc)("Worker %u finishing young region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
325 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(),
326 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
327
328 // Object does not fit. Get a new _young_to_region.
329 finish_young_region();
330 if (_empty_regions_pos < _empty_regions.length()) {
331 new_to_region = _empty_regions.at(_empty_regions_pos);
332 _empty_regions_pos++;
333 new_to_region->set_affiliation(YOUNG_GENERATION);
334 } else {
335 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
336 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
337 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
338 new_to_region = _from_region;
339 }
340
341 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
342 assert(new_to_region != nullptr, "must not be nullptr");
343 _young_to_region = new_to_region;
344 _young_compact_point = _young_to_region->bottom();
345 }
346
347 // Object fits into current region, record new location, if object does not move:
348 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
349 shenandoah_assert_not_forwarded(nullptr, p);
350
351 if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
352 _preserved_marks->push_if_necessary(p, p->mark());
353 FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
354 }
355 _young_compact_point += obj_size;
356 }
357 }
|
225 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
226 if (_young_to_region != nullptr) {
227 log_debug(gc)("Worker %u planned compaction into Young Region %zu, used: %zu",
228 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
229 _young_to_region->set_new_top(_young_compact_point);
230 _young_to_region = nullptr;
231 }
232 }
233
234 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
235 return (_from_region == _old_to_region) || (_from_region == _young_to_region);
236 }
237
238 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
239 assert(_from_region != nullptr, "must set before work");
240 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
241 "Object must reside in _from_region");
242 assert(_heap->global_generation()->complete_marking_context()->is_marked(p), "must be marked");
243 assert(!_heap->global_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
244
245 size_t old_size = p->size();
246 size_t new_size = p->copy_size(old_size, p->mark());
247 uint from_region_age = _from_region->age();
248 uint object_age = p->age();
249
250 bool promote_object = false;
251 if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
252 _heap->age_census()->is_tenurable(from_region_age + object_age)) {
253 if ((_old_to_region != nullptr) && (_old_compact_point + new_size > _old_to_region->end())) {
254 finish_old_region();
255 _old_to_region = nullptr;
256 }
257 if (_old_to_region == nullptr) {
258 if (_empty_regions_pos < _empty_regions.length()) {
259 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
260 _empty_regions_pos++;
261 new_to_region->set_affiliation(OLD_GENERATION);
262 _old_to_region = new_to_region;
263 _old_compact_point = _old_to_region->bottom();
264 promote_object = true;
265 }
266 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
267 // we leave promote_object as false, deferring the promotion.
268 } else {
269 promote_object = true;
270 }
271 }
272
273 if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
274 assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
275 size_t obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
276 if (_old_compact_point + obj_size > _old_to_region->end()) {
277 ShenandoahHeapRegion* new_to_region;
278
279 log_debug(gc)("Worker %u finishing old region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
280 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(),
281 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
282
283 // Object does not fit. Get a new _old_to_region.
284 finish_old_region();
285 if (_empty_regions_pos < _empty_regions.length()) {
286 new_to_region = _empty_regions.at(_empty_regions_pos);
287 _empty_regions_pos++;
288 new_to_region->set_affiliation(OLD_GENERATION);
289 } else {
290 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
291 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
292 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
293 new_to_region = _from_region;
294 }
295
296 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
297 assert(new_to_region != nullptr, "must not be nullptr");
298 _old_to_region = new_to_region;
299 _old_compact_point = _old_to_region->bottom();
300 obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
301 }
302
303 // Object fits into current region, record new location, if object does not move:
304 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
305 shenandoah_assert_not_forwarded(nullptr, p);
306 if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
307 _preserved_marks->push_if_necessary(p, p->mark());
308 FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
309 }
310 _old_compact_point += obj_size;
311 } else {
312 assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
313 "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
314 assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
315
316 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve
317 // tenuring progress.
318 if (_heap->is_aging_cycle()) {
319 ShenandoahHeap::increase_object_age(p, from_region_age + 1);
320 } else {
321 ShenandoahHeap::increase_object_age(p, from_region_age);
322 }
323
324 size_t obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
325 if (_young_compact_point + obj_size > _young_to_region->end()) {
326 ShenandoahHeapRegion* new_to_region;
327
328 log_debug(gc)("Worker %u finishing young region %zu, compact_point: " PTR_FORMAT ", obj_size: %zu"
329 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(),
330 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
331
332 // Object does not fit. Get a new _young_to_region.
333 finish_young_region();
334 if (_empty_regions_pos < _empty_regions.length()) {
335 new_to_region = _empty_regions.at(_empty_regions_pos);
336 _empty_regions_pos++;
337 new_to_region->set_affiliation(YOUNG_GENERATION);
338 } else {
339 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
340 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
341 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
342 new_to_region = _from_region;
343 }
344
345 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
346 assert(new_to_region != nullptr, "must not be nullptr");
347 _young_to_region = new_to_region;
348 obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
349 _young_compact_point = _young_to_region->bottom();
350 }
351
352 // Object fits into current region, record new location, if object does not move:
353 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
354 shenandoah_assert_not_forwarded(nullptr, p);
355
356 if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
357 _preserved_marks->push_if_necessary(p, p->mark());
358 FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
359 }
360 _young_compact_point += obj_size;
361 }
362 }
|