258 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
259 if (_young_to_region != nullptr) {
260 log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
261 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
262 _young_to_region->set_new_top(_young_compact_point);
263 _young_to_region = nullptr;
264 }
265 }
266
267 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
268 return (_from_region == _old_to_region) || (_from_region == _young_to_region);
269 }
270
271 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
272 assert(_from_region != nullptr, "must set before work");
273 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
274 "Object must reside in _from_region");
275 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
276 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
277
278 size_t obj_size = p->size();
279 uint from_region_age = _from_region->age();
280 uint object_age = p->age();
281
282 bool promote_object = false;
283 if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
284 (from_region_age + object_age >= _tenuring_threshold)) {
285 if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) {
286 finish_old_region();
287 _old_to_region = nullptr;
288 }
289 if (_old_to_region == nullptr) {
290 if (_empty_regions_pos < _empty_regions.length()) {
291 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
292 _empty_regions_pos++;
293 new_to_region->set_affiliation(OLD_GENERATION);
294 _old_to_region = new_to_region;
295 _old_compact_point = _old_to_region->bottom();
296 promote_object = true;
297 }
298 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
299 // we leave promote_object as false, deferring the promotion.
300 } else {
301 promote_object = true;
302 }
303 }
304
305 if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
306 assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
307 if (_old_compact_point + obj_size > _old_to_region->end()) {
308 ShenandoahHeapRegion* new_to_region;
309
310 log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
311 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(),
312 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
313
314 // Object does not fit. Get a new _old_to_region.
315 finish_old_region();
316 if (_empty_regions_pos < _empty_regions.length()) {
317 new_to_region = _empty_regions.at(_empty_regions_pos);
318 _empty_regions_pos++;
319 new_to_region->set_affiliation(OLD_GENERATION);
320 } else {
321 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
322 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
323 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
324 new_to_region = _from_region;
325 }
326
327 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
328 assert(new_to_region != nullptr, "must not be nullptr");
329 _old_to_region = new_to_region;
330 _old_compact_point = _old_to_region->bottom();
331 }
332
333 // Object fits into current region, record new location, if object does not move:
334 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
335 shenandoah_assert_not_forwarded(nullptr, p);
336 if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
337 _preserved_marks->push_if_necessary(p, p->mark());
338 FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
339 }
340 _old_compact_point += obj_size;
341 } else {
342 assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
343 "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
344 assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
345
346 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve
347 // tenuring progress.
348 if (_heap->is_aging_cycle()) {
349 ShenandoahHeap::increase_object_age(p, from_region_age + 1);
350 } else {
351 ShenandoahHeap::increase_object_age(p, from_region_age);
352 }
353
354 if (_young_compact_point + obj_size > _young_to_region->end()) {
355 ShenandoahHeapRegion* new_to_region;
356
357 log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
358 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(),
359 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
360
361 // Object does not fit. Get a new _young_to_region.
362 finish_young_region();
363 if (_empty_regions_pos < _empty_regions.length()) {
364 new_to_region = _empty_regions.at(_empty_regions_pos);
365 _empty_regions_pos++;
366 new_to_region->set_affiliation(YOUNG_GENERATION);
367 } else {
368 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
369 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
370 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
371 new_to_region = _from_region;
372 }
373
374 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
375 assert(new_to_region != nullptr, "must not be nullptr");
376 _young_to_region = new_to_region;
377 _young_compact_point = _young_to_region->bottom();
378 }
379
380 // Object fits into current region, record new location, if object does not move:
381 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
382 shenandoah_assert_not_forwarded(nullptr, p);
383
384 if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
385 _preserved_marks->push_if_necessary(p, p->mark());
386 FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
387 }
388 _young_compact_point += obj_size;
389 }
390 }
|
258 void ShenandoahPrepareForGenerationalCompactionObjectClosure::finish_young_region() {
259 if (_young_to_region != nullptr) {
260 log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
261 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
262 _young_to_region->set_new_top(_young_compact_point);
263 _young_to_region = nullptr;
264 }
265 }
266
267 bool ShenandoahPrepareForGenerationalCompactionObjectClosure::is_compact_same_region() {
268 return (_from_region == _old_to_region) || (_from_region == _young_to_region);
269 }
270
271 void ShenandoahPrepareForGenerationalCompactionObjectClosure::do_object(oop p) {
272 assert(_from_region != nullptr, "must set before work");
273 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
274 "Object must reside in _from_region");
275 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
276 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
277
278 size_t old_size = p->size();
279 size_t new_size = p->copy_size(old_size, p->mark());
280 uint from_region_age = _from_region->age();
281 uint object_age = p->age();
282
283 bool promote_object = false;
284 if ((_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) &&
285 (from_region_age + object_age >= _tenuring_threshold)) {
286 if ((_old_to_region != nullptr) && (_old_compact_point + new_size > _old_to_region->end())) {
287 finish_old_region();
288 _old_to_region = nullptr;
289 }
290 if (_old_to_region == nullptr) {
291 if (_empty_regions_pos < _empty_regions.length()) {
292 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
293 _empty_regions_pos++;
294 new_to_region->set_affiliation(OLD_GENERATION);
295 _old_to_region = new_to_region;
296 _old_compact_point = _old_to_region->bottom();
297 promote_object = true;
298 }
299 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
300 // we leave promote_object as false, deferring the promotion.
301 } else {
302 promote_object = true;
303 }
304 }
305
306 if (promote_object || (_from_affiliation == ShenandoahAffiliation::OLD_GENERATION)) {
307 assert(_old_to_region != nullptr, "_old_to_region should not be nullptr when evacuating to OLD region");
308 size_t obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
309 if (_old_compact_point + obj_size > _old_to_region->end()) {
310 ShenandoahHeapRegion* new_to_region;
311
312 log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
313 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(),
314 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
315
316 // Object does not fit. Get a new _old_to_region.
317 finish_old_region();
318 if (_empty_regions_pos < _empty_regions.length()) {
319 new_to_region = _empty_regions.at(_empty_regions_pos);
320 _empty_regions_pos++;
321 new_to_region->set_affiliation(OLD_GENERATION);
322 } else {
323 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
324 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
325 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
326 new_to_region = _from_region;
327 }
328
329 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
330 assert(new_to_region != nullptr, "must not be nullptr");
331 _old_to_region = new_to_region;
332 _old_compact_point = _old_to_region->bottom();
333 obj_size = _old_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
334 }
335
336 // Object fits into current region, record new location, if object does not move:
337 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
338 shenandoah_assert_not_forwarded(nullptr, p);
339 if (_old_compact_point != cast_from_oop<HeapWord*>(p)) {
340 _preserved_marks->push_if_necessary(p, p->mark());
341 FullGCForwarding::forward_to(p, cast_to_oop(_old_compact_point));
342 }
343 _old_compact_point += obj_size;
344 } else {
345 assert(_from_affiliation == ShenandoahAffiliation::YOUNG_GENERATION,
346 "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
347 assert(_young_to_region != nullptr, "_young_to_region should not be nullptr when compacting YOUNG _from_region");
348
349 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve
350 // tenuring progress.
351 if (_heap->is_aging_cycle()) {
352 ShenandoahHeap::increase_object_age(p, from_region_age + 1);
353 } else {
354 ShenandoahHeap::increase_object_age(p, from_region_age);
355 }
356
357 size_t obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
358 if (_young_compact_point + obj_size > _young_to_region->end()) {
359 ShenandoahHeapRegion* new_to_region;
360
361 log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
362 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(),
363 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
364
365 // Object does not fit. Get a new _young_to_region.
366 finish_young_region();
367 if (_empty_regions_pos < _empty_regions.length()) {
368 new_to_region = _empty_regions.at(_empty_regions_pos);
369 _empty_regions_pos++;
370 new_to_region->set_affiliation(YOUNG_GENERATION);
371 } else {
372 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
373 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
374 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
375 new_to_region = _from_region;
376 }
377
378 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
379 assert(new_to_region != nullptr, "must not be nullptr");
380 _young_to_region = new_to_region;
381 obj_size = _young_compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
382 _young_compact_point = _young_to_region->bottom();
383 }
384
385 // Object fits into current region, record new location, if object does not move:
386 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
387 shenandoah_assert_not_forwarded(nullptr, p);
388
389 if (_young_compact_point != cast_from_oop<HeapWord*>(p)) {
390 _preserved_marks->push_if_necessary(p, p->mark());
391 FullGCForwarding::forward_to(p, cast_to_oop(_young_compact_point));
392 }
393 _young_compact_point += obj_size;
394 }
395 }
|