307 ZRelocate::ZRelocate(ZGeneration* generation)
308 : _generation(generation),
309 _queue() {}
310
311 ZWorkers* ZRelocate::workers() const {
312 return _generation->workers();
313 }
314
315 void ZRelocate::start() {
316 _queue.activate(workers()->active_workers());
317 }
318
319 void ZRelocate::add_remset(volatile zpointer* p) {
320 ZGeneration::young()->remember(p);
321 }
322
323 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
324 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
325
326 // Allocate object
327 const size_t size = ZUtils::object_size(from_addr);
328
329 ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
330
331 const zaddress to_addr = allocator->alloc_object(size);
332
333 if (is_null(to_addr)) {
334 // Allocation failed
335 return zaddress::null;
336 }
337
338 // Copy object
339 ZUtils::object_copy_disjoint(from_addr, to_addr, size);
340
341 // Insert forwarding
342 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
343
344 if (to_addr_final != to_addr) {
345 // Already relocated, try undo allocation
346 allocator->undo_alloc_object(to_addr, size);
347 }
348
349 return to_addr_final;
350 }
351
352 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
353 ZForwardingCursor cursor;
354
355 // Lookup forwarding
356 zaddress to_addr = forwarding->find(from_addr, &cursor);
357 if (!is_null(to_addr)) {
358 // Already relocated
359 return to_addr;
571 return _target[static_cast<uint>(age) - 1];
572 }
573
574 void set_target(ZPageAge age, ZPage* page) {
575 _target[static_cast<uint>(age) - 1] = page;
576 }
577
578 size_t object_alignment() const {
579 return (size_t)1 << _forwarding->object_alignment_shift();
580 }
581
582 void increase_other_forwarded(size_t unaligned_object_size) {
583 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
584 if (_forwarding->is_promotion()) {
585 _other_promoted += aligned_size;
586 } else {
587 _other_compacted += aligned_size;
588 }
589 }
590
591 zaddress try_relocate_object_inner(zaddress from_addr) {
592 ZForwardingCursor cursor;
593
594 const size_t size = ZUtils::object_size(from_addr);
595 ZPage* const to_page = target(_forwarding->to_age());
596
597 // Lookup forwarding
598 {
599 const zaddress to_addr = _forwarding->find(from_addr, &cursor);
600 if (!is_null(to_addr)) {
601 // Already relocated
602 increase_other_forwarded(size);
603 return to_addr;
604 }
605 }
606
607 // Allocate object
608 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
609 if (is_null(allocated_addr)) {
610 // Allocation failed
611 return zaddress::null;
612 }
613
614 // Copy object. Use conjoint copying if we are relocating
615 // in-place and the new object overlaps with the old object.
616 if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
617 ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
618 } else {
619 ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
620 }
621
622 // Insert forwarding
623 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
624 if (to_addr != allocated_addr) {
625 // Already relocated, undo allocation
626 _allocator->undo_alloc_object(to_page, to_addr, size);
627 increase_other_forwarded(size);
628 }
629
630 return to_addr;
631 }
632
633 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
634 // Old-to-old relocation - move existing remset bits
635
636 // If this is called for an in-place relocated page, then this code has the
637 // responsibility to clear the old remset bits. Extra care is needed because:
638 //
639 // 1) The to-object copy can overlap with the from-object copy
640 // 2) Remset bits of old objects need to be cleared
641 //
642 // A watermark is used to keep track of how far the old remset bits have been removed.
643
644 const bool in_place = _forwarding->in_place_relocation();
645 ZPage* const from_page = _forwarding->page();
646 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
647
648 // Note: even with in-place relocation, the to_page could be another page
649 ZPage* const to_page = ZHeap::heap()->page(to_addr);
650
651 // Uses _relaxed version to handle that in-place relocation resets _top
652 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
653 assert(to_page->is_in(to_addr), "Must be");
654
655
656 // Read the size from the to-object, since the from-object
657 // could have been overwritten during in-place relocation.
658 const size_t size = ZUtils::object_size(to_addr);
659
660 // If a young generation collection started while the old generation
661 // relocated objects, the remember set bits were flipped from "current"
662 // to "previous".
663 //
664 // We need to select the correct remembered sets bitmap to ensure that the
665 // old remset bits are found.
666 //
667 // Note that if the young generation marking (remset scanning) finishes
668 // before the old generation relocation has relocated this page, then the
669 // young generation will visit this page's previous remembered set bits and
670 // moved them over to the current bitmap.
671 //
672 // If the young generation runs multiple cycles while the old generation is
673 // relocating, then the first cycle will have consume the the old remset,
674 // bits and moved associated objects to a new old page. The old relocation
675 // could find either the the two bitmaps. So, either it will find the original
676 // remset bits for the page, or it will find an empty bitmap for the page. It
677 // doesn't matter for correctness, because the young generation marking has
678 // already taken care of the bits.
763
764 if (!is_null(addr)) {
765 // Object has already been relocated
766 if (!add_remset_if_young(p, addr)) {
767 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
768 ZBarrier::remap_young_relocated(p, ptr);
769 }
770 return;
771 }
772
773 // Object has not been relocated yet
774 // Don't want to eagerly relocate objects, so just add a remset
775 ZRelocate::add_remset(p);
776 return;
777 }
778
779 void update_remset_promoted(zaddress to_addr) const {
780 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
781 }
782
783 void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
784 if (_forwarding->to_age() != ZPageAge::old) {
785 // No remembered set in young pages
786 return;
787 }
788
789 // Need to deal with remset when moving objects to the old generation
790 if (_forwarding->from_age() == ZPageAge::old) {
791 update_remset_old_to_old(from_addr, to_addr);
792 return;
793 }
794
795 // Normal promotion
796 update_remset_promoted(to_addr);
797 }
798
799 bool try_relocate_object(zaddress from_addr) {
800 const zaddress to_addr = try_relocate_object_inner(from_addr);
801
802 if (is_null(to_addr)) {
803 return false;
804 }
805
806 update_remset_for_fields(from_addr, to_addr);
807
808 return true;
809 }
810
811 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
812 if (_forwarding->from_age() != ZPageAge::old) {
813 // Only old pages have use remset bits
814 return;
815 }
816
817 if (ZGeneration::old()->active_remset_is_current()) {
818 // We want to iterate over and clear the remset bits of the from-space page,
819 // and insert current bits in the to-space page. However, with in-place
820 // relocation, the from-space and to-space pages are the same. Clearing
821 // is destructive, and is difficult to perform before or during the iteration.
822 // However, clearing of the current bits has to be done before exposing the
823 // to-space objects in the forwarding table.
824 //
825 // To solve this tricky dependency problem, we start by stashing away the
826 // current bits in the previous bits, and clearing the current bits
|
307 ZRelocate::ZRelocate(ZGeneration* generation)
308 : _generation(generation),
309 _queue() {}
310
311 ZWorkers* ZRelocate::workers() const {
312 return _generation->workers();
313 }
314
315 void ZRelocate::start() {
316 _queue.activate(workers()->active_workers());
317 }
318
319 void ZRelocate::add_remset(volatile zpointer* p) {
320 ZGeneration::young()->remember(p);
321 }
322
323 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
324 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
325
326 // Allocate object
327 const size_t old_size = ZUtils::object_size(from_addr);
328 const size_t size = ZUtils::copy_size(from_addr, old_size);
329
330 ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
331
332 const zaddress to_addr = allocator->alloc_object(size);
333
334 if (is_null(to_addr)) {
335 // Allocation failed
336 return zaddress::null;
337 }
338 assert(to_addr != from_addr, "addresses must be different");
339
340 // Copy object
341 ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
342 ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
343
344 // Insert forwarding
345 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
346
347 if (to_addr_final != to_addr) {
348 // Already relocated, try undo allocation
349 allocator->undo_alloc_object(to_addr, size);
350 }
351
352 return to_addr_final;
353 }
354
355 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
356 ZForwardingCursor cursor;
357
358 // Lookup forwarding
359 zaddress to_addr = forwarding->find(from_addr, &cursor);
360 if (!is_null(to_addr)) {
361 // Already relocated
362 return to_addr;
574 return _target[static_cast<uint>(age) - 1];
575 }
576
577 void set_target(ZPageAge age, ZPage* page) {
578 _target[static_cast<uint>(age) - 1] = page;
579 }
580
581 size_t object_alignment() const {
582 return (size_t)1 << _forwarding->object_alignment_shift();
583 }
584
585 void increase_other_forwarded(size_t unaligned_object_size) {
586 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
587 if (_forwarding->is_promotion()) {
588 _other_promoted += aligned_size;
589 } else {
590 _other_compacted += aligned_size;
591 }
592 }
593
594 zaddress try_relocate_object_inner(zaddress from_addr, size_t old_size) {
595 ZForwardingCursor cursor;
596 ZPage* const to_page = target(_forwarding->to_age());
597 zoffset_end from_offset = to_zoffset_end(ZAddress::offset(from_addr));
598 zoffset_end top = to_page != nullptr ? to_page->top() : to_zoffset_end(0);
599 const size_t new_size = ZUtils::copy_size(from_addr, old_size);
600 const size_t size = top == from_offset ? old_size : new_size;
601
602 // Lookup forwarding
603 {
604 const zaddress to_addr = _forwarding->find(from_addr, &cursor);
605 if (!is_null(to_addr)) {
606 // Already relocated
607 increase_other_forwarded(size);
608 return to_addr;
609 }
610 }
611
612 // Allocate object
613 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
614 if (is_null(allocated_addr)) {
615 // Allocation failed
616 return zaddress::null;
617 }
618 if (old_size != new_size && ((top == from_offset) != (allocated_addr == from_addr))) {
619 _allocator->undo_alloc_object(to_page, allocated_addr, size);
620 return zaddress::null;
621 }
622
623 // Copy object. Use conjoint copying if we are relocating
624 // in-place and the new object overlaps with the old object.
625 if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
626 ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
627 } else {
628 ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
629 }
630 if (from_addr != allocated_addr) {
631 ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
632 }
633
634 // Insert forwarding
635 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
636 if (to_addr != allocated_addr) {
637 // Already relocated, undo allocation
638 _allocator->undo_alloc_object(to_page, to_addr, size);
639 increase_other_forwarded(size);
640 }
641
642 return to_addr;
643 }
644
645 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
646 // Old-to-old relocation - move existing remset bits
647
648 // If this is called for an in-place relocated page, then this code has the
649 // responsibility to clear the old remset bits. Extra care is needed because:
650 //
651 // 1) The to-object copy can overlap with the from-object copy
652 // 2) Remset bits of old objects need to be cleared
653 //
654 // A watermark is used to keep track of how far the old remset bits have been removed.
655
656 const bool in_place = _forwarding->in_place_relocation();
657 ZPage* const from_page = _forwarding->page();
658 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
659
660 // Note: even with in-place relocation, the to_page could be another page
661 ZPage* const to_page = ZHeap::heap()->page(to_addr);
662
663 // Uses _relaxed version to handle that in-place relocation resets _top
664 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
665 assert(to_page->is_in(to_addr), "Must be");
666
667 assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
668 assert(size > 0, "size must be set");
669
670 // If a young generation collection started while the old generation
671 // relocated objects, the remember set bits were flipped from "current"
672 // to "previous".
673 //
674 // We need to select the correct remembered sets bitmap to ensure that the
675 // old remset bits are found.
676 //
677 // Note that if the young generation marking (remset scanning) finishes
678 // before the old generation relocation has relocated this page, then the
679 // young generation will visit this page's previous remembered set bits and
680 // moved them over to the current bitmap.
681 //
682 // If the young generation runs multiple cycles while the old generation is
683 // relocating, then the first cycle will have consume the the old remset,
684 // bits and moved associated objects to a new old page. The old relocation
685 // could find either the the two bitmaps. So, either it will find the original
686 // remset bits for the page, or it will find an empty bitmap for the page. It
687 // doesn't matter for correctness, because the young generation marking has
688 // already taken care of the bits.
773
774 if (!is_null(addr)) {
775 // Object has already been relocated
776 if (!add_remset_if_young(p, addr)) {
777 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
778 ZBarrier::remap_young_relocated(p, ptr);
779 }
780 return;
781 }
782
783 // Object has not been relocated yet
784 // Don't want to eagerly relocate objects, so just add a remset
785 ZRelocate::add_remset(p);
786 return;
787 }
788
789 void update_remset_promoted(zaddress to_addr) const {
790 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
791 }
792
793 void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
794 if (_forwarding->to_age() != ZPageAge::old) {
795 // No remembered set in young pages
796 return;
797 }
798
799 // Need to deal with remset when moving objects to the old generation
800 if (_forwarding->from_age() == ZPageAge::old) {
801 update_remset_old_to_old(from_addr, to_addr, size);
802 return;
803 }
804
805 // Normal promotion
806 update_remset_promoted(to_addr);
807 }
808
809 bool try_relocate_object(zaddress from_addr) {
810 size_t size = ZUtils::object_size(from_addr);
811 const zaddress to_addr = try_relocate_object_inner(from_addr, size);
812
813 if (is_null(to_addr)) {
814 return false;
815 }
816
817 update_remset_for_fields(from_addr, to_addr, size);
818
819 return true;
820 }
821
822 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
823 if (_forwarding->from_age() != ZPageAge::old) {
824 // Only old pages have use remset bits
825 return;
826 }
827
828 if (ZGeneration::old()->active_remset_is_current()) {
829 // We want to iterate over and clear the remset bits of the from-space page,
830 // and insert current bits in the to-space page. However, with in-place
831 // relocation, the from-space and to-space pages are the same. Clearing
832 // is destructive, and is difficult to perform before or during the iteration.
833 // However, clearing of the current bits has to be done before exposing the
834 // to-space objects in the forwarding table.
835 //
836 // To solve this tricky dependency problem, we start by stashing away the
837 // current bits in the previous bits, and clearing the current bits
|