< prev index next >

src/hotspot/share/gc/z/zRelocate.cpp

Print this page

 307 ZRelocate::ZRelocate(ZGeneration* generation)
 308   : _generation(generation),
 309     _queue() {}
 310 
 311 ZWorkers* ZRelocate::workers() const {
 312   return _generation->workers();
 313 }
 314 
 315 void ZRelocate::start() {
 316   _queue.activate(workers()->active_workers());
 317 }
 318 
 319 void ZRelocate::add_remset(volatile zpointer* p) {
 320   ZGeneration::young()->remember(p);
 321 }
 322 
 323 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 324   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 325 
 326   // Allocate object
 327   const size_t size = ZUtils::object_size(from_addr);

 328 
 329   ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
 330 
 331   const zaddress to_addr = allocator->alloc_object(size);
 332 
 333   if (is_null(to_addr)) {
 334     // Allocation failed
 335     return zaddress::null;
 336   }

 337 
 338   // Copy object
 339   ZUtils::object_copy_disjoint(from_addr, to_addr, size);

 340 
 341   // Insert forwarding
 342   const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
 343 
 344   if (to_addr_final != to_addr) {
 345     // Already relocated, try undo allocation
 346     allocator->undo_alloc_object(to_addr, size);
 347   }
 348 
 349   return to_addr_final;
 350 }
 351 
 352 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 353   ZForwardingCursor cursor;
 354 
 355   // Lookup forwarding
 356   zaddress to_addr = forwarding->find(from_addr, &cursor);
 357   if (!is_null(to_addr)) {
 358     // Already relocated
 359     return to_addr;

 573     return _target[static_cast<uint>(age) - 1];
 574   }
 575 
 576   void set_target(ZPageAge age, ZPage* page) {
 577     _target[static_cast<uint>(age) - 1] = page;
 578   }
 579 
 580   size_t object_alignment() const {
 581     return (size_t)1 << _forwarding->object_alignment_shift();
 582   }
 583 
 584   void increase_other_forwarded(size_t unaligned_object_size) {
 585     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 586     if (_forwarding->is_promotion()) {
 587       _other_promoted += aligned_size;
 588     } else {
 589       _other_compacted += aligned_size;
 590     }
 591   }
 592 
 593   zaddress try_relocate_object_inner(zaddress from_addr) {
 594     ZForwardingCursor cursor;
 595 
 596     const size_t size = ZUtils::object_size(from_addr);
 597     ZPage* const to_page = target(_forwarding->to_age());




 598 
 599     // Lookup forwarding
 600     {
 601       const zaddress to_addr = _forwarding->find(from_addr, &cursor);
 602       if (!is_null(to_addr)) {
 603         // Already relocated
 604         increase_other_forwarded(size);
 605         return to_addr;
 606       }
 607     }
 608 
 609     // Allocate object
 610     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 611     if (is_null(allocated_addr)) {
 612       // Allocation failed
 613       return zaddress::null;
 614     }




 615 
 616     // Copy object. Use conjoint copying if we are relocating
 617     // in-place and the new object overlaps with the old object.
 618     if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
 619       ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
 620     } else {
 621       ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);



 622     }
 623 
 624     // Insert forwarding
 625     const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
 626     if (to_addr != allocated_addr) {
 627       // Already relocated, undo allocation
 628       _allocator->undo_alloc_object(to_page, to_addr, size);
 629       increase_other_forwarded(size);
 630     }
 631 
 632     return to_addr;
 633   }
 634 
 635   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
 636     // Old-to-old relocation - move existing remset bits
 637 
 638     // If this is called for an in-place relocated page, then this code has the
 639     // responsibility to clear the old remset bits. Extra care is needed because:
 640     //
 641     // 1) The to-object copy can overlap with the from-object copy
 642     // 2) Remset bits of old objects need to be cleared
 643     //
 644     // A watermark is used to keep track of how far the old remset bits have been removed.
 645 
 646     const bool in_place = _forwarding->in_place_relocation();
 647     ZPage* const from_page = _forwarding->page();
 648     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 649 
 650     // Note: even with in-place relocation, the to_page could be another page
 651     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 652 
 653     // Uses _relaxed version to handle that in-place relocation resets _top
 654     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 655     assert(to_page->is_in(to_addr), "Must be");
 656 
 657 
 658     // Read the size from the to-object, since the from-object
 659     // could have been overwritten during in-place relocation.
 660     const size_t size = ZUtils::object_size(to_addr);
 661 
 662     // If a young generation collection started while the old generation
 663     // relocated  objects, the remember set bits were flipped from "current"
 664     // to "previous".
 665     //
 666     // We need to select the correct remembered sets bitmap to ensure that the
 667     // old remset bits are found.
 668     //
 669     // Note that if the young generation marking (remset scanning) finishes
 670     // before the old generation relocation has relocated this page, then the
 671     // young generation will visit this page's previous remembered set bits and
 672     // moved them over to the current bitmap.
 673     //
 674     // If the young generation runs multiple cycles while the old generation is
 675     // relocating, then the first cycle will have consumed the old remset,
 676     // bits and moved associated objects to a new old page. The old relocation
 677     // could find either of the two bitmaps. So, either it will find the original
 678     // remset bits for the page, or it will find an empty bitmap for the page. It
 679     // doesn't matter for correctness, because the young generation marking has
 680     // already taken care of the bits.

 765 
 766     if (!is_null(addr)) {
 767       // Object has already been relocated
 768       if (!add_remset_if_young(p, addr)) {
 769         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 770         ZBarrier::remap_young_relocated(p, ptr);
 771       }
 772       return;
 773     }
 774 
 775     // Object has not been relocated yet
 776     // Don't want to eagerly relocate objects, so just add a remset
 777     ZRelocate::add_remset(p);
 778     return;
 779   }
 780 
 781   void update_remset_promoted(zaddress to_addr) const {
 782     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 783   }
 784 
 785   void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
 786     if (_forwarding->to_age() != ZPageAge::old) {
 787       // No remembered set in young pages
 788       return;
 789     }
 790 
 791     // Need to deal with remset when moving objects to the old generation
 792     if (_forwarding->from_age() == ZPageAge::old) {
 793       update_remset_old_to_old(from_addr, to_addr);
 794       return;
 795     }
 796 
 797     // Normal promotion
 798     update_remset_promoted(to_addr);
 799   }
 800 
 801   void maybe_string_dedup(zaddress to_addr) {
 802     if (_forwarding->is_promotion()) {
 803       // Only deduplicate promoted objects, and let short-lived strings simply die instead.
 804       _string_dedup_context.request(to_oop(to_addr));
 805     }
 806   }
 807 
 808   bool try_relocate_object(zaddress from_addr) {
 809     const zaddress to_addr = try_relocate_object_inner(from_addr);

 810 
 811     if (is_null(to_addr)) {
 812       return false;
 813     }
 814 
 815     update_remset_for_fields(from_addr, to_addr);
 816 
 817     maybe_string_dedup(to_addr);
 818 
 819     return true;
 820   }
 821 
 822   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 823     if (_forwarding->from_age() != ZPageAge::old) {
 824       // Only old pages have use remset bits
 825       return;
 826     }
 827 
 828     if (ZGeneration::old()->active_remset_is_current()) {
 829       // We want to iterate over and clear the remset bits of the from-space page,
 830       // and insert current bits in the to-space page. However, with in-place
 831       // relocation, the from-space and to-space pages are the same. Clearing
 832       // is destructive, and is difficult to perform before or during the iteration.
 833       // However, clearing of the current bits has to be done before exposing the
 834       // to-space objects in the forwarding table.
 835       //

 307 ZRelocate::ZRelocate(ZGeneration* generation)
 308   : _generation(generation),
 309     _queue() {}
 310 
 311 ZWorkers* ZRelocate::workers() const {
 312   return _generation->workers();
 313 }
 314 
 315 void ZRelocate::start() {
 316   _queue.activate(workers()->active_workers());
 317 }
 318 
 319 void ZRelocate::add_remset(volatile zpointer* p) {
 320   ZGeneration::young()->remember(p);
 321 }
 322 
 323 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 324   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 325 
 326   // Allocate object
 327   const size_t old_size = ZUtils::object_size(from_addr);
 328   const size_t size = ZUtils::copy_size(from_addr, old_size);
 329 
 330   ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
 331 
 332   const zaddress to_addr = allocator->alloc_object(size);
 333 
 334   if (is_null(to_addr)) {
 335     // Allocation failed
 336     return zaddress::null;
 337   }
 338   assert(to_addr != from_addr, "addresses must be different");
 339 
 340   // Copy object
 341   ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
 342   ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
 343 
 344   // Insert forwarding
 345   const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
 346 
 347   if (to_addr_final != to_addr) {
 348     // Already relocated, try undo allocation
 349     allocator->undo_alloc_object(to_addr, size);
 350   }
 351 
 352   return to_addr_final;
 353 }
 354 
 355 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 356   ZForwardingCursor cursor;
 357 
 358   // Lookup forwarding
 359   zaddress to_addr = forwarding->find(from_addr, &cursor);
 360   if (!is_null(to_addr)) {
 361     // Already relocated
 362     return to_addr;

 576     return _target[static_cast<uint>(age) - 1];
 577   }
 578 
 579   void set_target(ZPageAge age, ZPage* page) {
 580     _target[static_cast<uint>(age) - 1] = page;
 581   }
 582 
 583   size_t object_alignment() const {
 584     return (size_t)1 << _forwarding->object_alignment_shift();
 585   }
 586 
 587   void increase_other_forwarded(size_t unaligned_object_size) {
 588     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 589     if (_forwarding->is_promotion()) {
 590       _other_promoted += aligned_size;
 591     } else {
 592       _other_compacted += aligned_size;
 593     }
 594   }
 595 
 596   zaddress try_relocate_object_inner(zaddress from_addr, size_t old_size) {
 597     ZForwardingCursor cursor;


 598     ZPage* const to_page = target(_forwarding->to_age());
 599     zoffset_end from_offset = to_zoffset_end(ZAddress::offset(from_addr));
 600     zoffset_end top = to_page != nullptr ? to_page->top() : to_zoffset_end(0);
 601     const size_t new_size = ZUtils::copy_size(from_addr, old_size);
 602     const size_t size = top == from_offset ? old_size : new_size;
 603 
 604     // Lookup forwarding
 605     {
 606       const zaddress to_addr = _forwarding->find(from_addr, &cursor);
 607       if (!is_null(to_addr)) {
 608         // Already relocated
 609         increase_other_forwarded(size);
 610         return to_addr;
 611       }
 612     }
 613 
 614     // Allocate object
 615     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 616     if (is_null(allocated_addr)) {
 617       // Allocation failed
 618       return zaddress::null;
 619     }
 620     if (old_size != new_size && ((top == from_offset) != (allocated_addr == from_addr))) {
 621       _allocator->undo_alloc_object(to_page, allocated_addr, size);
 622       return zaddress::null;
 623     }
 624 
 625     // Copy object. Use conjoint copying if we are relocating
 626     // in-place and the new object overlaps with the old object.
 627     if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
 628       ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
 629     } else {
 630       ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
 631     }
 632     if (from_addr != allocated_addr) {
 633       ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
 634     }
 635 
 636     // Insert forwarding
 637     const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
 638     if (to_addr != allocated_addr) {
 639       // Already relocated, undo allocation
 640       _allocator->undo_alloc_object(to_page, to_addr, size);
 641       increase_other_forwarded(size);
 642     }
 643 
 644     return to_addr;
 645   }
 646 
 647   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
 648     // Old-to-old relocation - move existing remset bits
 649 
 650     // If this is called for an in-place relocated page, then this code has the
 651     // responsibility to clear the old remset bits. Extra care is needed because:
 652     //
 653     // 1) The to-object copy can overlap with the from-object copy
 654     // 2) Remset bits of old objects need to be cleared
 655     //
 656     // A watermark is used to keep track of how far the old remset bits have been removed.
 657 
 658     const bool in_place = _forwarding->in_place_relocation();
 659     ZPage* const from_page = _forwarding->page();
 660     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 661 
 662     // Note: even with in-place relocation, the to_page could be another page
 663     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 664 
 665     // Uses _relaxed version to handle that in-place relocation resets _top
 666     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 667     assert(to_page->is_in(to_addr), "Must be");
 668 
 669     assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
 670     assert(size > 0, "size must be set");


 671 
 672     // If a young generation collection started while the old generation
 673     // relocated  objects, the remember set bits were flipped from "current"
 674     // to "previous".
 675     //
 676     // We need to select the correct remembered sets bitmap to ensure that the
 677     // old remset bits are found.
 678     //
 679     // Note that if the young generation marking (remset scanning) finishes
 680     // before the old generation relocation has relocated this page, then the
 681     // young generation will visit this page's previous remembered set bits and
 682     // moved them over to the current bitmap.
 683     //
 684     // If the young generation runs multiple cycles while the old generation is
 685     // relocating, then the first cycle will have consumed the old remset,
 686     // bits and moved associated objects to a new old page. The old relocation
 687     // could find either of the two bitmaps. So, either it will find the original
 688     // remset bits for the page, or it will find an empty bitmap for the page. It
 689     // doesn't matter for correctness, because the young generation marking has
 690     // already taken care of the bits.

 775 
 776     if (!is_null(addr)) {
 777       // Object has already been relocated
 778       if (!add_remset_if_young(p, addr)) {
 779         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 780         ZBarrier::remap_young_relocated(p, ptr);
 781       }
 782       return;
 783     }
 784 
 785     // Object has not been relocated yet
 786     // Don't want to eagerly relocate objects, so just add a remset
 787     ZRelocate::add_remset(p);
 788     return;
 789   }
 790 
 791   void update_remset_promoted(zaddress to_addr) const {
 792     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 793   }
 794 
 795   void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
 796     if (_forwarding->to_age() != ZPageAge::old) {
 797       // No remembered set in young pages
 798       return;
 799     }
 800 
 801     // Need to deal with remset when moving objects to the old generation
 802     if (_forwarding->from_age() == ZPageAge::old) {
 803       update_remset_old_to_old(from_addr, to_addr, size);
 804       return;
 805     }
 806 
 807     // Normal promotion
 808     update_remset_promoted(to_addr);
 809   }
 810 
 811   void maybe_string_dedup(zaddress to_addr) {
 812     if (_forwarding->is_promotion()) {
 813       // Only deduplicate promoted objects, and let short-lived strings simply die instead.
 814       _string_dedup_context.request(to_oop(to_addr));
 815     }
 816   }
 817 
 818   bool try_relocate_object(zaddress from_addr) {
 819     size_t size = ZUtils::object_size(from_addr);
 820     const zaddress to_addr = try_relocate_object_inner(from_addr, size);
 821 
 822     if (is_null(to_addr)) {
 823       return false;
 824     }
 825 
 826     update_remset_for_fields(from_addr, to_addr, size);
 827 
 828     maybe_string_dedup(to_addr);
 829 
 830     return true;
 831   }
 832 
 833   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 834     if (_forwarding->from_age() != ZPageAge::old) {
 835       // Only old pages have use remset bits
 836       return;
 837     }
 838 
 839     if (ZGeneration::old()->active_remset_is_current()) {
 840       // We want to iterate over and clear the remset bits of the from-space page,
 841       // and insert current bits in the to-space page. However, with in-place
 842       // relocation, the from-space and to-space pages are the same. Clearing
 843       // is destructive, and is difficult to perform before or during the iteration.
 844       // However, clearing of the current bits has to be done before exposing the
 845       // to-space objects in the forwarding table.
 846       //
< prev index next >