306 ZRelocate::ZRelocate(ZGeneration* generation)
307 : _generation(generation),
308 _queue() {}
309
310 ZWorkers* ZRelocate::workers() const {
311 return _generation->workers();
312 }
313
314 void ZRelocate::start() {
315 _queue.activate(workers()->active_workers());
316 }
317
318 void ZRelocate::add_remset(volatile zpointer* p) {
319 ZGeneration::young()->remember(p);
320 }
321
322 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
323 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
324
325 // Allocate object
326 const size_t size = ZUtils::object_size(from_addr);
327
328 ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
329
330 const zaddress to_addr = allocator->alloc_object(size);
331
332 if (is_null(to_addr)) {
333 // Allocation failed
334 return zaddress::null;
335 }
336
337 // Copy object
338 ZUtils::object_copy_disjoint(from_addr, to_addr, size);
339
340 // Insert forwarding
341 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
342
343 if (to_addr_final != to_addr) {
344 // Already relocated, try undo allocation
345 allocator->undo_alloc_object(to_addr, size);
346 }
347
348 return to_addr_final;
349 }
350
351 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
352 ZForwardingCursor cursor;
353
354 // Lookup forwarding
355 zaddress to_addr = forwarding->find(from_addr, &cursor);
356 if (!is_null(to_addr)) {
357 // Already relocated
358 return to_addr;
570 return _target[static_cast<uint>(age) - 1];
571 }
572
573 void set_target(ZPageAge age, ZPage* page) {
574 _target[static_cast<uint>(age) - 1] = page;
575 }
576
577 size_t object_alignment() const {
578 return (size_t)1 << _forwarding->object_alignment_shift();
579 }
580
581 void increase_other_forwarded(size_t unaligned_object_size) {
582 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
583 if (_forwarding->is_promotion()) {
584 _other_promoted += aligned_size;
585 } else {
586 _other_compacted += aligned_size;
587 }
588 }
589
590 zaddress try_relocate_object_inner(zaddress from_addr) {
591 ZForwardingCursor cursor;
592
593 const size_t size = ZUtils::object_size(from_addr);
594 ZPage* const to_page = target(_forwarding->to_age());
595
596 // Lookup forwarding
597 {
598 const zaddress to_addr = _forwarding->find(from_addr, &cursor);
599 if (!is_null(to_addr)) {
600 // Already relocated
601 increase_other_forwarded(size);
602 return to_addr;
603 }
604 }
605
606 // Allocate object
607 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
608 if (is_null(allocated_addr)) {
609 // Allocation failed
610 return zaddress::null;
611 }
612
613 // Copy object. Use conjoint copying if we are relocating
614 // in-place and the new object overlaps with the old object.
615 if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
616 ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
617 } else {
618 ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
619 }
620
621 // Insert forwarding
622 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
623 if (to_addr != allocated_addr) {
624 // Already relocated, undo allocation
625 _allocator->undo_alloc_object(to_page, to_addr, size);
626 increase_other_forwarded(size);
627 }
628
629 return to_addr;
630 }
631
632 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
633 // Old-to-old relocation - move existing remset bits
634
635 // If this is called for an in-place relocated page, then this code has the
636 // responsibility to clear the old remset bits. Extra care is needed because:
637 //
638 // 1) The to-object copy can overlap with the from-object copy
639 // 2) Remset bits of old objects need to be cleared
640 //
641 // A watermark is used to keep track of how far the old remset bits have been removed.
642
643 const bool in_place = _forwarding->in_place_relocation();
644 ZPage* const from_page = _forwarding->page();
645 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
646
647 // Note: even with in-place relocation, the to_page could be another page
648 ZPage* const to_page = ZHeap::heap()->page(to_addr);
649
650 // Uses _relaxed version to handle that in-place relocation resets _top
651 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
652 assert(to_page->is_in(to_addr), "Must be");
653
654
655 // Read the size from the to-object, since the from-object
656 // could have been overwritten during in-place relocation.
657 const size_t size = ZUtils::object_size(to_addr);
658
659 // If a young generation collection started while the old generation
660 // relocated objects, the remember set bits were flipped from "current"
661 // to "previous".
662 //
663 // We need to select the correct remembered sets bitmap to ensure that the
664 // old remset bits are found.
665 //
666 // Note that if the young generation marking (remset scanning) finishes
667 // before the old generation relocation has relocated this page, then the
668 // young generation will visit this page's previous remembered set bits and
669 // moved them over to the current bitmap.
670 //
671 // If the young generation runs multiple cycles while the old generation is
672 // relocating, then the first cycle will have consume the the old remset,
673 // bits and moved associated objects to a new old page. The old relocation
674 // could find either the the two bitmaps. So, either it will find the original
675 // remset bits for the page, or it will find an empty bitmap for the page. It
676 // doesn't matter for correctness, because the young generation marking has
677 // already taken care of the bits.
762
763 if (!is_null(addr)) {
764 // Object has already been relocated
765 if (!add_remset_if_young(p, addr)) {
766 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
767 ZBarrier::remap_young_relocated(p, ptr);
768 }
769 return;
770 }
771
772 // Object has not been relocated yet
773 // Don't want to eagerly relocate objects, so just add a remset
774 ZRelocate::add_remset(p);
775 return;
776 }
777
778 void update_remset_promoted(zaddress to_addr) const {
779 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
780 }
781
782 void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
783 if (_forwarding->to_age() != ZPageAge::old) {
784 // No remembered set in young pages
785 return;
786 }
787
788 // Need to deal with remset when moving objects to the old generation
789 if (_forwarding->from_age() == ZPageAge::old) {
790 update_remset_old_to_old(from_addr, to_addr);
791 return;
792 }
793
794 // Normal promotion
795 update_remset_promoted(to_addr);
796 }
797
798 bool try_relocate_object(zaddress from_addr) {
799 const zaddress to_addr = try_relocate_object_inner(from_addr);
800
801 if (is_null(to_addr)) {
802 return false;
803 }
804
805 update_remset_for_fields(from_addr, to_addr);
806
807 return true;
808 }
809
810 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
811 if (_forwarding->from_age() != ZPageAge::old) {
812 // Only old pages have use remset bits
813 return;
814 }
815
816 if (ZGeneration::old()->active_remset_is_current()) {
817 // We want to iterate over and clear the remset bits of the from-space page,
818 // and insert current bits in the to-space page. However, with in-place
819 // relocation, the from-space and to-space pages are the same. Clearing
820 // is destructive, and is difficult to perform before or during the iteration.
821 // However, clearing of the current bits has to be done before exposing the
822 // to-space objects in the forwarding table.
823 //
824 // To solve this tricky dependency problem, we start by stashing away the
825 // current bits in the previous bits, and clearing the current bits
|
306 ZRelocate::ZRelocate(ZGeneration* generation)
307 : _generation(generation),
308 _queue() {}
309
310 ZWorkers* ZRelocate::workers() const {
311 return _generation->workers();
312 }
313
314 void ZRelocate::start() {
315 _queue.activate(workers()->active_workers());
316 }
317
318 void ZRelocate::add_remset(volatile zpointer* p) {
319 ZGeneration::young()->remember(p);
320 }
321
322 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
323 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
324
325 // Allocate object
326 const size_t old_size = ZUtils::object_size(from_addr);
327 const size_t size = ZUtils::copy_size(from_addr, old_size);
328
329 ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
330
331 const zaddress to_addr = allocator->alloc_object(size);
332
333 if (is_null(to_addr)) {
334 // Allocation failed
335 return zaddress::null;
336 }
337 assert(to_addr != from_addr, "addresses must be different");
338
339 // Copy object
340 ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
341 ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
342
343 // Insert forwarding
344 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
345
346 if (to_addr_final != to_addr) {
347 // Already relocated, try undo allocation
348 allocator->undo_alloc_object(to_addr, size);
349 }
350
351 return to_addr_final;
352 }
353
354 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
355 ZForwardingCursor cursor;
356
357 // Lookup forwarding
358 zaddress to_addr = forwarding->find(from_addr, &cursor);
359 if (!is_null(to_addr)) {
360 // Already relocated
361 return to_addr;
573 return _target[static_cast<uint>(age) - 1];
574 }
575
576 void set_target(ZPageAge age, ZPage* page) {
577 _target[static_cast<uint>(age) - 1] = page;
578 }
579
580 size_t object_alignment() const {
581 return (size_t)1 << _forwarding->object_alignment_shift();
582 }
583
584 void increase_other_forwarded(size_t unaligned_object_size) {
585 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
586 if (_forwarding->is_promotion()) {
587 _other_promoted += aligned_size;
588 } else {
589 _other_compacted += aligned_size;
590 }
591 }
592
593 zaddress try_relocate_object_inner(zaddress from_addr, size_t old_size) {
594 ZForwardingCursor cursor;
595 ZPage* const to_page = target(_forwarding->to_age());
596 zoffset_end from_offset = to_zoffset_end(ZAddress::offset(from_addr));
597 zoffset_end top = to_page != nullptr ? to_page->top() : to_zoffset_end(0);
598 const size_t new_size = ZUtils::copy_size(from_addr, old_size);
599 const size_t size = top == from_offset ? old_size : new_size;
600
601 // Lookup forwarding
602 {
603 const zaddress to_addr = _forwarding->find(from_addr, &cursor);
604 if (!is_null(to_addr)) {
605 // Already relocated
606 increase_other_forwarded(size);
607 return to_addr;
608 }
609 }
610
611 // Allocate object
612 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
613 if (is_null(allocated_addr)) {
614 // Allocation failed
615 return zaddress::null;
616 }
617 if (old_size != new_size && ((top == from_offset) != (allocated_addr == from_addr))) {
618 _allocator->undo_alloc_object(to_page, allocated_addr, size);
619 return zaddress::null;
620 }
621
622 // Copy object. Use conjoint copying if we are relocating
623 // in-place and the new object overlaps with the old object.
624 if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
625 ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
626 } else {
627 ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
628 }
629 if (from_addr != allocated_addr) {
630 ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
631 }
632
633 // Insert forwarding
634 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
635 if (to_addr != allocated_addr) {
636 // Already relocated, undo allocation
637 _allocator->undo_alloc_object(to_page, to_addr, size);
638 increase_other_forwarded(size);
639 }
640
641 return to_addr;
642 }
643
644 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
645 // Old-to-old relocation - move existing remset bits
646
647 // If this is called for an in-place relocated page, then this code has the
648 // responsibility to clear the old remset bits. Extra care is needed because:
649 //
650 // 1) The to-object copy can overlap with the from-object copy
651 // 2) Remset bits of old objects need to be cleared
652 //
653 // A watermark is used to keep track of how far the old remset bits have been removed.
654
655 const bool in_place = _forwarding->in_place_relocation();
656 ZPage* const from_page = _forwarding->page();
657 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
658
659 // Note: even with in-place relocation, the to_page could be another page
660 ZPage* const to_page = ZHeap::heap()->page(to_addr);
661
662 // Uses _relaxed version to handle that in-place relocation resets _top
663 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
664 assert(to_page->is_in(to_addr), "Must be");
665
666 assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
667 assert(size > 0, "size must be set");
668
669 // If a young generation collection started while the old generation
670 // relocated objects, the remember set bits were flipped from "current"
671 // to "previous".
672 //
673 // We need to select the correct remembered sets bitmap to ensure that the
674 // old remset bits are found.
675 //
676 // Note that if the young generation marking (remset scanning) finishes
677 // before the old generation relocation has relocated this page, then the
678 // young generation will visit this page's previous remembered set bits and
679 // moved them over to the current bitmap.
680 //
681 // If the young generation runs multiple cycles while the old generation is
682 // relocating, then the first cycle will have consume the the old remset,
683 // bits and moved associated objects to a new old page. The old relocation
684 // could find either the the two bitmaps. So, either it will find the original
685 // remset bits for the page, or it will find an empty bitmap for the page. It
686 // doesn't matter for correctness, because the young generation marking has
687 // already taken care of the bits.
772
773 if (!is_null(addr)) {
774 // Object has already been relocated
775 if (!add_remset_if_young(p, addr)) {
776 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
777 ZBarrier::remap_young_relocated(p, ptr);
778 }
779 return;
780 }
781
782 // Object has not been relocated yet
783 // Don't want to eagerly relocate objects, so just add a remset
784 ZRelocate::add_remset(p);
785 return;
786 }
787
788 void update_remset_promoted(zaddress to_addr) const {
789 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
790 }
791
792 void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
793 if (_forwarding->to_age() != ZPageAge::old) {
794 // No remembered set in young pages
795 return;
796 }
797
798 // Need to deal with remset when moving objects to the old generation
799 if (_forwarding->from_age() == ZPageAge::old) {
800 update_remset_old_to_old(from_addr, to_addr, size);
801 return;
802 }
803
804 // Normal promotion
805 update_remset_promoted(to_addr);
806 }
807
808 bool try_relocate_object(zaddress from_addr) {
809 size_t size = ZUtils::object_size(from_addr);
810 const zaddress to_addr = try_relocate_object_inner(from_addr, size);
811
812 if (is_null(to_addr)) {
813 return false;
814 }
815
816 update_remset_for_fields(from_addr, to_addr, size);
817
818 return true;
819 }
820
821 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
822 if (_forwarding->from_age() != ZPageAge::old) {
823 // Only old pages have use remset bits
824 return;
825 }
826
827 if (ZGeneration::old()->active_remset_is_current()) {
828 // We want to iterate over and clear the remset bits of the from-space page,
829 // and insert current bits in the to-space page. However, with in-place
830 // relocation, the from-space and to-space pages are the same. Clearing
831 // is destructive, and is difficult to perform before or during the iteration.
832 // However, clearing of the current bits has to be done before exposing the
833 // to-space objects in the forwarding table.
834 //
835 // To solve this tricky dependency problem, we start by stashing away the
836 // current bits in the previous bits, and clearing the current bits
|