< prev index next >

src/hotspot/share/gc/z/zRelocate.cpp

Print this page

 338     _small_targets(),
 339     _medium_targets(),
 340     _shared_medium_targets() {}
 341 
 342 ZWorkers* ZRelocate::workers() const {
 343   return _generation->workers();
 344 }
 345 
 346 void ZRelocate::start() {
 347   _queue.activate(workers()->active_workers());
 348 }
 349 
 350 void ZRelocate::add_remset(volatile zpointer* p) {
 351   ZGeneration::young()->remember(p);
 352 }
 353 
 354 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 355   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 356 
 357   // Allocate object
 358   const size_t size = ZUtils::object_size(from_addr);

 359   const ZPageAge to_age = forwarding->to_age();
 360 
 361   const zaddress to_addr = ZHeap::heap()->alloc_object_for_relocation(size, to_age);
 362 
 363   if (is_null(to_addr)) {
 364     // Allocation failed
 365     return zaddress::null;
 366   }

 367 
 368   // Copy object
 369   ZUtils::object_copy_disjoint(from_addr, to_addr, size);

 370 
 371   // Insert forwarding
 372   const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
 373 
 374   if (to_addr_final != to_addr) {
 375     // Already relocated, try undo allocation
 376     ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size);
 377   }
 378 
 379   return to_addr_final;
 380 }
 381 
 382 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 383   ZForwardingCursor cursor;
 384 
 385   // Lookup forwarding
 386   zaddress to_addr = forwarding->find(from_addr, &cursor);
 387   if (!is_null(to_addr)) {
 388     // Already relocated
 389     return to_addr;

 590   ZForwarding*        _forwarding;
 591   ZRelocationTargets* _targets;
 592   ZGeneration* const  _generation;
 593   size_t              _other_promoted;
 594   size_t              _other_compacted;
 595   ZStringDedupContext _string_dedup_context;
 596 
 597   size_t object_alignment() const {
 598     return (size_t)1 << _forwarding->object_alignment_shift();
 599   }
 600 
 601   void increase_other_forwarded(size_t unaligned_object_size) {
 602     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 603     if (_forwarding->is_promotion()) {
 604       _other_promoted += aligned_size;
 605     } else {
 606       _other_compacted += aligned_size;
 607     }
 608   }
 609 
 610   zaddress try_relocate_object_inner(zaddress from_addr, uint32_t partition_id) {
 611     ZForwardingCursor cursor;
 612 
 613     const size_t size = ZUtils::object_size(from_addr);
 614     ZPage* const to_page = _targets->get(partition_id, _forwarding->to_age());




 615 
 616     // Lookup forwarding
 617     {
 618       const zaddress to_addr = _forwarding->find(from_addr, &cursor);
 619       if (!is_null(to_addr)) {
 620         // Already relocated
 621         increase_other_forwarded(size);
 622         return to_addr;
 623       }
 624     }
 625 
 626     // Allocate object
 627     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 628     if (is_null(allocated_addr)) {
 629       // Allocation failed
 630       return zaddress::null;
 631     }




 632 
 633     // Copy object. Use conjoint copying if we are relocating
 634     // in-place and the new object overlaps with the old object.
 635     if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
 636       ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
 637     } else {
 638       ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);



 639     }
 640 
 641     // Insert forwarding
 642     const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
 643     if (to_addr != allocated_addr) {
 644       // Already relocated, undo allocation
 645       _allocator->undo_alloc_object(to_page, to_addr, size);
 646       increase_other_forwarded(size);
 647     }
 648 
 649     return to_addr;
 650   }
 651 
 652   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
 653     // Old-to-old relocation - move existing remset bits
 654 
 655     // If this is called for an in-place relocated page, then this code has the
 656     // responsibility to clear the old remset bits. Extra care is needed because:
 657     //
 658     // 1) The to-object copy can overlap with the from-object copy
 659     // 2) Remset bits of old objects need to be cleared
 660     //
 661     // A watermark is used to keep track of how far the old remset bits have been removed.
 662 
 663     const bool in_place = _forwarding->in_place_relocation();
 664     ZPage* const from_page = _forwarding->page();
 665     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 666 
 667     // Note: even with in-place relocation, the to_page could be another page
 668     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 669 
 670     // Uses _relaxed version to handle that in-place relocation resets _top
 671     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 672     assert(to_page->is_in(to_addr), "Must be");
 673 
 674     // Read the size from the to-object, since the from-object
 675     // could have been overwritten during in-place relocation.
 676     const size_t size = ZUtils::object_size(to_addr);
 677 
 678     // If a young generation collection started while the old generation
 679     // relocated  objects, the remember set bits were flipped from "current"
 680     // to "previous".
 681     //
 682     // We need to select the correct remembered sets bitmap to ensure that the
 683     // old remset bits are found.
 684     //
 685     // Note that if the young generation marking (remset scanning) finishes
 686     // before the old generation relocation has relocated this page, then the
 687     // young generation will visit this page's previous remembered set bits and
 688     // moved them over to the current bitmap.
 689     //
 690     // If the young generation runs multiple cycles while the old generation is
 691     // relocating, then the first cycle will have consumed the old remset,
 692     // bits and moved associated objects to a new old page. The old relocation
 693     // could find either of the two bitmaps. So, either it will find the original
 694     // remset bits for the page, or it will find an empty bitmap for the page. It
 695     // doesn't matter for correctness, because the young generation marking has
 696     // already taken care of the bits.

 781 
 782     if (!is_null(addr)) {
 783       // Object has already been relocated
 784       if (!add_remset_if_young(p, addr)) {
 785         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 786         ZBarrier::remap_young_relocated(p, ptr);
 787       }
 788       return;
 789     }
 790 
 791     // Object has not been relocated yet
 792     // Don't want to eagerly relocate objects, so just add a remset
 793     ZRelocate::add_remset(p);
 794     return;
 795   }
 796 
 797   void update_remset_promoted(zaddress to_addr) const {
 798     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 799   }
 800 
 801   void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
 802     if (_forwarding->to_age() != ZPageAge::old) {
 803       // No remembered set in young pages
 804       return;
 805     }
 806 
 807     // Need to deal with remset when moving objects to the old generation
 808     if (_forwarding->from_age() == ZPageAge::old) {
 809       update_remset_old_to_old(from_addr, to_addr);
 810       return;
 811     }
 812 
 813     // Normal promotion
 814     update_remset_promoted(to_addr);
 815   }
 816 
 817   void maybe_string_dedup(zaddress to_addr) {
 818     if (_forwarding->is_promotion()) {
 819       // Only deduplicate promoted objects, and let short-lived strings simply die instead.
 820       _string_dedup_context.request(to_oop(to_addr));
 821     }
 822   }
 823 
 824   bool try_relocate_object(zaddress from_addr, uint32_t partition_id) {
 825     const zaddress to_addr = try_relocate_object_inner(from_addr, partition_id);

 826 
 827     if (is_null(to_addr)) {
 828       return false;
 829     }
 830 
 831     update_remset_for_fields(from_addr, to_addr);
 832 
 833     maybe_string_dedup(to_addr);
 834 
 835     return true;
 836   }
 837 
 838   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 839     if (_forwarding->from_age() != ZPageAge::old) {
 840       // Only old pages have use remset bits
 841       return;
 842     }
 843 
 844     if (ZGeneration::old()->active_remset_is_current()) {
 845       // We want to iterate over and clear the remset bits of the from-space page,
 846       // and insert current bits in the to-space page. However, with in-place
 847       // relocation, the from-space and to-space pages are the same. Clearing
 848       // is destructive, and is difficult to perform before or during the iteration.
 849       // However, clearing of the current bits has to be done before exposing the
 850       // to-space objects in the forwarding table.
 851       //

 338     _small_targets(),
 339     _medium_targets(),
 340     _shared_medium_targets() {}
 341 
 342 ZWorkers* ZRelocate::workers() const {
 343   return _generation->workers();
 344 }
 345 
 346 void ZRelocate::start() {
 347   _queue.activate(workers()->active_workers());
 348 }
 349 
 350 void ZRelocate::add_remset(volatile zpointer* p) {
 351   ZGeneration::young()->remember(p);
 352 }
 353 
 354 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 355   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 356 
 357   // Allocate object
 358   const size_t old_size = ZUtils::object_size(from_addr);
 359   const size_t size = ZUtils::copy_size(from_addr, old_size);
 360   const ZPageAge to_age = forwarding->to_age();
 361 
 362   const zaddress to_addr = ZHeap::heap()->alloc_object_for_relocation(size, to_age);
 363 
 364   if (is_null(to_addr)) {
 365     // Allocation failed
 366     return zaddress::null;
 367   }
 368   assert(to_addr != from_addr, "addresses must be different");
 369 
 370   // Copy object
 371   ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
 372   ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
 373 
 374   // Insert forwarding
 375   const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
 376 
 377   if (to_addr_final != to_addr) {
 378     // Already relocated, try undo allocation
 379     ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size);
 380   }
 381 
 382   return to_addr_final;
 383 }
 384 
 385 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 386   ZForwardingCursor cursor;
 387 
 388   // Lookup forwarding
 389   zaddress to_addr = forwarding->find(from_addr, &cursor);
 390   if (!is_null(to_addr)) {
 391     // Already relocated
 392     return to_addr;

 593   ZForwarding*        _forwarding;
 594   ZRelocationTargets* _targets;
 595   ZGeneration* const  _generation;
 596   size_t              _other_promoted;
 597   size_t              _other_compacted;
 598   ZStringDedupContext _string_dedup_context;
 599 
 600   size_t object_alignment() const {
 601     return (size_t)1 << _forwarding->object_alignment_shift();
 602   }
 603 
 604   void increase_other_forwarded(size_t unaligned_object_size) {
 605     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 606     if (_forwarding->is_promotion()) {
 607       _other_promoted += aligned_size;
 608     } else {
 609       _other_compacted += aligned_size;
 610     }
 611   }
 612 
 613   zaddress try_relocate_object_inner(zaddress from_addr, uint32_t partition_id, size_t old_size) {
 614     ZForwardingCursor cursor;
 615 

 616     ZPage* const to_page = _targets->get(partition_id, _forwarding->to_age());
 617     zoffset_end from_offset = to_zoffset_end(ZAddress::offset(from_addr));
 618     zoffset_end top = to_page != nullptr ? to_page->top() : to_zoffset_end(0);
 619     const size_t new_size = ZUtils::copy_size(from_addr, old_size);
 620     const size_t size = top == from_offset ? old_size : new_size;
 621 
 622     // Lookup forwarding
 623     {
 624       const zaddress to_addr = _forwarding->find(from_addr, &cursor);
 625       if (!is_null(to_addr)) {
 626         // Already relocated
 627         increase_other_forwarded(size);
 628         return to_addr;
 629       }
 630     }
 631 
 632     // Allocate object
 633     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 634     if (is_null(allocated_addr)) {
 635       // Allocation failed
 636       return zaddress::null;
 637     }
 638     if (old_size != new_size && ((top == from_offset) != (allocated_addr == from_addr))) {
 639       _allocator->undo_alloc_object(to_page, allocated_addr, size);
 640       return zaddress::null;
 641     }
 642 
 643     // Copy object. Use conjoint copying if we are relocating
 644     // in-place and the new object overlaps with the old object.
 645     if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
 646       ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
 647     } else {
 648       ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
 649     }
 650     if (from_addr != allocated_addr) {
 651       ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
 652     }
 653 
 654     // Insert forwarding
 655     const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
 656     if (to_addr != allocated_addr) {
 657       // Already relocated, undo allocation
 658       _allocator->undo_alloc_object(to_page, to_addr, size);
 659       increase_other_forwarded(size);
 660     }
 661 
 662     return to_addr;
 663   }
 664 
 665   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
 666     // Old-to-old relocation - move existing remset bits
 667 
 668     // If this is called for an in-place relocated page, then this code has the
 669     // responsibility to clear the old remset bits. Extra care is needed because:
 670     //
 671     // 1) The to-object copy can overlap with the from-object copy
 672     // 2) Remset bits of old objects need to be cleared
 673     //
 674     // A watermark is used to keep track of how far the old remset bits have been removed.
 675 
 676     const bool in_place = _forwarding->in_place_relocation();
 677     ZPage* const from_page = _forwarding->page();
 678     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 679 
 680     // Note: even with in-place relocation, the to_page could be another page
 681     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 682 
 683     // Uses _relaxed version to handle that in-place relocation resets _top
 684     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 685     assert(to_page->is_in(to_addr), "Must be");
 686 
 687     assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
 688     assert(size > 0, "size must be set");

 689 
 690     // If a young generation collection started while the old generation
 691     // relocated  objects, the remember set bits were flipped from "current"
 692     // to "previous".
 693     //
 694     // We need to select the correct remembered sets bitmap to ensure that the
 695     // old remset bits are found.
 696     //
 697     // Note that if the young generation marking (remset scanning) finishes
 698     // before the old generation relocation has relocated this page, then the
 699     // young generation will visit this page's previous remembered set bits and
 700     // moved them over to the current bitmap.
 701     //
 702     // If the young generation runs multiple cycles while the old generation is
 703     // relocating, then the first cycle will have consumed the old remset,
 704     // bits and moved associated objects to a new old page. The old relocation
 705     // could find either of the two bitmaps. So, either it will find the original
 706     // remset bits for the page, or it will find an empty bitmap for the page. It
 707     // doesn't matter for correctness, because the young generation marking has
 708     // already taken care of the bits.

 793 
 794     if (!is_null(addr)) {
 795       // Object has already been relocated
 796       if (!add_remset_if_young(p, addr)) {
 797         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 798         ZBarrier::remap_young_relocated(p, ptr);
 799       }
 800       return;
 801     }
 802 
 803     // Object has not been relocated yet
 804     // Don't want to eagerly relocate objects, so just add a remset
 805     ZRelocate::add_remset(p);
 806     return;
 807   }
 808 
 809   void update_remset_promoted(zaddress to_addr) const {
 810     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 811   }
 812 
 813   void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
 814     if (_forwarding->to_age() != ZPageAge::old) {
 815       // No remembered set in young pages
 816       return;
 817     }
 818 
 819     // Need to deal with remset when moving objects to the old generation
 820     if (_forwarding->from_age() == ZPageAge::old) {
 821       update_remset_old_to_old(from_addr, to_addr, size);
 822       return;
 823     }
 824 
 825     // Normal promotion
 826     update_remset_promoted(to_addr);
 827   }
 828 
 829   void maybe_string_dedup(zaddress to_addr) {
 830     if (_forwarding->is_promotion()) {
 831       // Only deduplicate promoted objects, and let short-lived strings simply die instead.
 832       _string_dedup_context.request(to_oop(to_addr));
 833     }
 834   }
 835 
 836   bool try_relocate_object(zaddress from_addr, uint32_t partition_id) {
 837     size_t size = ZUtils::object_size(from_addr);
 838     const zaddress to_addr = try_relocate_object_inner(from_addr, partition_id, size);
 839 
 840     if (is_null(to_addr)) {
 841       return false;
 842     }
 843 
 844     update_remset_for_fields(from_addr, to_addr, size);
 845 
 846     maybe_string_dedup(to_addr);
 847 
 848     return true;
 849   }
 850 
 851   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 852     if (_forwarding->from_age() != ZPageAge::old) {
 853       // Only old pages have use remset bits
 854       return;
 855     }
 856 
 857     if (ZGeneration::old()->active_remset_is_current()) {
 858       // We want to iterate over and clear the remset bits of the from-space page,
 859       // and insert current bits in the to-space page. However, with in-place
 860       // relocation, the from-space and to-space pages are the same. Clearing
 861       // is destructive, and is difficult to perform before or during the iteration.
 862       // However, clearing of the current bits has to be done before exposing the
 863       // to-space objects in the forwarding table.
 864       //
< prev index next >