1 /*
   2  * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gc_globals.hpp"
  26 #include "gc/shared/suspendibleThreadSet.hpp"
  27 #include "gc/z/zAbort.inline.hpp"
  28 #include "gc/z/zAddress.inline.hpp"
  29 #include "gc/z/zAllocator.inline.hpp"
  30 #include "gc/z/zBarrier.inline.hpp"
  31 #include "gc/z/zCollectedHeap.hpp"
  32 #include "gc/z/zForwarding.inline.hpp"
  33 #include "gc/z/zGeneration.inline.hpp"
  34 #include "gc/z/zHeap.inline.hpp"
  35 #include "gc/z/zIndexDistributor.inline.hpp"
  36 #include "gc/z/zIterator.inline.hpp"
  37 #include "gc/z/zPage.inline.hpp"
  38 #include "gc/z/zPageAge.hpp"
  39 #include "gc/z/zRelocate.hpp"
  40 #include "gc/z/zRelocationSet.inline.hpp"
  41 #include "gc/z/zRootsIterator.hpp"
  42 #include "gc/z/zStackWatermark.hpp"
  43 #include "gc/z/zStat.hpp"
  44 #include "gc/z/zTask.hpp"
  45 #include "gc/z/zUncoloredRoot.inline.hpp"
  46 #include "gc/z/zVerify.hpp"
  47 #include "gc/z/zWorkers.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/atomic.hpp"
  50 #include "utilities/debug.hpp"
  51 
  52 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
  53 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
  54 
  55 ZRelocateQueue::ZRelocateQueue()
  56   : _lock(),
  57     _queue(),
  58     _nworkers(0),
  59     _nsynchronized(0),
  60     _synchronize(false),
  61     _is_active(false),
  62     _needs_attention(0) {}
  63 
  64 bool ZRelocateQueue::needs_attention() const {
  65   return Atomic::load(&_needs_attention) != 0;
  66 }
  67 
  68 void ZRelocateQueue::inc_needs_attention() {
  69   const int needs_attention = Atomic::add(&_needs_attention, 1);
  70   assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
  71 }
  72 
  73 void ZRelocateQueue::dec_needs_attention() {
  74   const int needs_attention = Atomic::sub(&_needs_attention, 1);
  75   assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
  76 }
  77 
  78 void ZRelocateQueue::activate(uint nworkers) {
  79   _is_active = true;
  80   join(nworkers);
  81 }
  82 
  83 void ZRelocateQueue::deactivate() {
  84   Atomic::store(&_is_active, false);
  85   clear();
  86 }
  87 
  88 bool ZRelocateQueue::is_active() const {
  89   return Atomic::load(&_is_active);
  90 }
  91 
  92 void ZRelocateQueue::join(uint nworkers) {
  93   assert(nworkers != 0, "Must request at least one worker");
  94   assert(_nworkers == 0, "Invalid state");
  95   assert(_nsynchronized == 0, "Invalid state");
  96 
  97   log_debug(gc, reloc)("Joining workers: %u", nworkers);
  98 
  99   _nworkers = nworkers;
 100 }
 101 
 102 void ZRelocateQueue::resize_workers(uint nworkers) {
 103   assert(nworkers != 0, "Must request at least one worker");
 104   assert(_nworkers == 0, "Invalid state");
 105   assert(_nsynchronized == 0, "Invalid state");
 106 
 107   log_debug(gc, reloc)("Resize workers: %u", nworkers);
 108 
 109   ZLocker<ZConditionLock> locker(&_lock);
 110   _nworkers = nworkers;
 111 }
 112 
 113 void ZRelocateQueue::leave() {
 114   ZLocker<ZConditionLock> locker(&_lock);
 115   _nworkers--;
 116 
 117   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 118 
 119   log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
 120 
 121   // Prune done forwardings
 122   const bool forwardings_done = prune();
 123 
 124   // Check if all workers synchronized
 125   const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
 126 
 127   if (forwardings_done || last_synchronized) {
 128     _lock.notify_all();
 129   }
 130 }
 131 
 132 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
 133   ZStatTimer timer(ZCriticalPhaseRelocationStall);
 134   ZLocker<ZConditionLock> locker(&_lock);
 135 
 136   if (forwarding->is_done()) {
 137     return;
 138   }
 139 
 140   _queue.append(forwarding);
 141   if (_queue.length() == 1) {
 142     // Queue became non-empty
 143     inc_needs_attention();
 144     _lock.notify_all();
 145   }
 146 
 147   while (!forwarding->is_done()) {
 148     _lock.wait();
 149   }
 150 }
 151 
 152 bool ZRelocateQueue::prune() {
 153   if (_queue.is_empty()) {
 154     return false;
 155   }
 156 
 157   bool done = false;
 158 
 159   for (int i = 0; i < _queue.length();) {
 160     const ZForwarding* const forwarding = _queue.at(i);
 161     if (forwarding->is_done()) {
 162       done = true;
 163 
 164       _queue.delete_at(i);
 165     } else {
 166       i++;
 167     }
 168   }
 169 
 170   if (_queue.is_empty()) {
 171     dec_needs_attention();
 172   }
 173 
 174   return done;
 175 }
 176 
 177 ZForwarding* ZRelocateQueue::prune_and_claim() {
 178   if (prune()) {
 179     _lock.notify_all();
 180   }
 181 
 182   for (int i = 0; i < _queue.length(); i++) {
 183     ZForwarding* const forwarding = _queue.at(i);
 184     if (forwarding->claim()) {
 185       return forwarding;
 186     }
 187   }
 188 
 189   return nullptr;
 190 }
 191 
 192 class ZRelocateQueueSynchronizeThread {
 193 private:
 194   ZRelocateQueue* const _queue;
 195 
 196 public:
 197   ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
 198     : _queue(queue) {
 199     _queue->synchronize_thread();
 200   }
 201 
 202   ~ZRelocateQueueSynchronizeThread() {
 203     _queue->desynchronize_thread();
 204   }
 205 };
 206 
 207 void ZRelocateQueue::synchronize_thread() {
 208   _nsynchronized++;
 209 
 210   log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
 211 
 212   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 213   if (_nsynchronized == _nworkers) {
 214     // All workers synchronized
 215     _lock.notify_all();
 216   }
 217 }
 218 
 219 void ZRelocateQueue::desynchronize_thread() {
 220   _nsynchronized--;
 221 
 222   log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
 223 
 224   assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 225 }
 226 
 227 ZForwarding* ZRelocateQueue::synchronize_poll() {
 228   // Fast path avoids locking
 229   if (!needs_attention()) {
 230     return nullptr;
 231   }
 232 
 233   // Slow path to get the next forwarding and/or synchronize
 234   ZLocker<ZConditionLock> locker(&_lock);
 235 
 236   {
 237     ZForwarding* const forwarding = prune_and_claim();
 238     if (forwarding != nullptr) {
 239       // Don't become synchronized while there are elements in the queue
 240       return forwarding;
 241     }
 242   }
 243 
 244   if (!_synchronize) {
 245     return nullptr;
 246   }
 247 
 248   ZRelocateQueueSynchronizeThread rqst(this);
 249 
 250   do {
 251     _lock.wait();
 252 
 253     ZForwarding* const forwarding = prune_and_claim();
 254     if (forwarding != nullptr) {
 255       return forwarding;
 256     }
 257   } while (_synchronize);
 258 
 259   return nullptr;
 260 }
 261 
 262 void ZRelocateQueue::clear() {
 263   assert(_nworkers == 0, "Invalid state");
 264 
 265   if (_queue.is_empty()) {
 266     return;
 267   }
 268 
 269   ZArrayIterator<ZForwarding*> iter(&_queue);
 270   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 271     assert(forwarding->is_done(), "All should be done");
 272   }
 273 
 274   assert(false, "Clear was not empty");
 275 
 276   _queue.clear();
 277   dec_needs_attention();
 278 }
 279 
 280 void ZRelocateQueue::synchronize() {
 281   ZLocker<ZConditionLock> locker(&_lock);
 282   _synchronize = true;
 283 
 284   inc_needs_attention();
 285 
 286   log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 287 
 288   while (_nworkers != _nsynchronized) {
 289     _lock.wait();
 290     log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 291   }
 292 }
 293 
 294 void ZRelocateQueue::desynchronize() {
 295   ZLocker<ZConditionLock> locker(&_lock);
 296   _synchronize = false;
 297 
 298   log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 299 
 300   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 301 
 302   dec_needs_attention();
 303 
 304   _lock.notify_all();
 305 }
 306 
 307 ZRelocate::ZRelocate(ZGeneration* generation)
 308   : _generation(generation),
 309     _queue() {}
 310 
 311 ZWorkers* ZRelocate::workers() const {
 312   return _generation->workers();
 313 }
 314 
 315 void ZRelocate::start() {
 316   _queue.activate(workers()->active_workers());
 317 }
 318 
 319 void ZRelocate::add_remset(volatile zpointer* p) {
 320   ZGeneration::young()->remember(p);
 321 }
 322 
 323 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 324   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 325 
 326   // Allocate object
 327   const size_t old_size = ZUtils::object_size(from_addr);
 328   const size_t size = ZUtils::copy_size(from_addr, old_size);
 329 
 330   ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
 331 
 332   const zaddress to_addr = allocator->alloc_object(size);
 333 
 334   if (is_null(to_addr)) {
 335     // Allocation failed
 336     return zaddress::null;
 337   }
 338   assert(to_addr != from_addr, "addresses must be different");
 339 
 340   // Copy object
 341   ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
 342   ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
 343 
 344   // Insert forwarding
 345   const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
 346 
 347   if (to_addr_final != to_addr) {
 348     // Already relocated, try undo allocation
 349     allocator->undo_alloc_object(to_addr, size);
 350   }
 351 
 352   return to_addr_final;
 353 }
 354 
 355 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 356   ZForwardingCursor cursor;
 357 
 358   // Lookup forwarding
 359   zaddress to_addr = forwarding->find(from_addr, &cursor);
 360   if (!is_null(to_addr)) {
 361     // Already relocated
 362     return to_addr;
 363   }
 364 
 365   // Relocate object
 366   if (forwarding->retain_page(&_queue)) {
 367     assert(_generation->is_phase_relocate(), "Must be");
 368     to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
 369     forwarding->release_page();
 370 
 371     if (!is_null(to_addr)) {
 372       // Success
 373       return to_addr;
 374     }
 375 
 376     // Failed to relocate object. Signal and wait for a worker thread to
 377     // complete relocation of this page, and then forward the object.
 378     _queue.add_and_wait(forwarding);
 379   }
 380 
 381   // Forward object
 382   return forward_object(forwarding, from_addr);
 383 }
 384 
 385 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 386   const zaddress to_addr = forwarding->find(from_addr);
 387   assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
 388   return to_addr;
 389 }
 390 
 391 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) {
 392   if (ZStressRelocateInPlace) {
 393     // Simulate failure to allocate a new page. This will
 394     // cause the page being relocated to be relocated in-place.
 395     return nullptr;
 396   }
 397 
 398   ZAllocationFlags flags;
 399   flags.set_non_blocking();
 400   flags.set_gc_relocation();
 401 
 402   return allocator->alloc_page_for_relocation(type, size, flags);
 403 }
 404 
 405 static void retire_target_page(ZGeneration* generation, ZPage* page) {
 406   if (generation->is_young() && page->is_old()) {
 407     generation->increase_promoted(page->used());
 408   } else {
 409     generation->increase_compacted(page->used());
 410   }
 411 
 412   // Free target page if it is empty. We can end up with an empty target
 413   // page if we allocated a new target page, and then lost the race to
 414   // relocate the remaining objects, leaving the target page empty when
 415   // relocation completed.
 416   if (page->used() == 0) {
 417     ZHeap::heap()->free_page(page, true /* allow_defragment */);
 418   }
 419 }
 420 
 421 class ZRelocateSmallAllocator {
 422 private:
 423   ZGeneration* const _generation;
 424   volatile size_t    _in_place_count;
 425 
 426 public:
 427   ZRelocateSmallAllocator(ZGeneration* generation)
 428     : _generation(generation),
 429       _in_place_count(0) {}
 430 
 431   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 432     ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 433     ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size());
 434     if (page == nullptr) {
 435       Atomic::inc(&_in_place_count);
 436     }
 437 
 438     if (target != nullptr) {
 439       // Retire the old target page
 440       retire_target_page(_generation, target);
 441     }
 442 
 443     return page;
 444   }
 445 
 446   void share_target_page(ZPage* page) {
 447     // Does nothing
 448   }
 449 
 450   void free_target_page(ZPage* page) {
 451     if (page != nullptr) {
 452       retire_target_page(_generation, page);
 453     }
 454   }
 455 
 456   zaddress alloc_object(ZPage* page, size_t size) const {
 457     return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
 458   }
 459 
 460   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 461     page->undo_alloc_object(addr, size);
 462   }
 463 
 464   size_t in_place_count() const {
 465     return _in_place_count;
 466   }
 467 };
 468 
 469 class ZRelocateMediumAllocator {
 470 private:
 471   ZGeneration* const _generation;
 472   ZConditionLock     _lock;
 473   ZPage*             _shared[ZAllocator::_relocation_allocators];
 474   bool               _in_place;
 475   volatile size_t    _in_place_count;
 476 
 477 public:
 478   ZRelocateMediumAllocator(ZGeneration* generation)
 479     : _generation(generation),
 480       _lock(),
 481       _shared(),
 482       _in_place(false),
 483       _in_place_count(0) {}
 484 
 485   ~ZRelocateMediumAllocator() {
 486     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 487       if (_shared[i] != nullptr) {
 488         retire_target_page(_generation, _shared[i]);
 489       }
 490     }
 491   }
 492 
 493   ZPage* shared(ZPageAge age) {
 494     return _shared[static_cast<uint>(age) - 1];
 495   }
 496 
 497   void set_shared(ZPageAge age, ZPage* page) {
 498     _shared[static_cast<uint>(age) - 1] = page;
 499   }
 500 
 501   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 502     ZLocker<ZConditionLock> locker(&_lock);
 503 
 504     // Wait for any ongoing in-place relocation to complete
 505     while (_in_place) {
 506       _lock.wait();
 507     }
 508 
 509     // Allocate a new page only if the shared page is the same as the
 510     // current target page. The shared page will be different from the
 511     // current target page if another thread shared a page, or allocated
 512     // a new page.
 513     const ZPageAge to_age = forwarding->to_age();
 514     if (shared(to_age) == target) {
 515       ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 516       ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size());
 517       set_shared(to_age, to_page);
 518       if (to_page == nullptr) {
 519         Atomic::inc(&_in_place_count);
 520         _in_place = true;
 521       }
 522 
 523       // This thread is responsible for retiring the shared target page
 524       if (target != nullptr) {
 525         retire_target_page(_generation, target);
 526       }
 527     }
 528 
 529     return shared(to_age);
 530   }
 531 
 532   void share_target_page(ZPage* page) {
 533     const ZPageAge age = page->age();
 534 
 535     ZLocker<ZConditionLock> locker(&_lock);
 536     assert(_in_place, "Invalid state");
 537     assert(shared(age) == nullptr, "Invalid state");
 538     assert(page != nullptr, "Invalid page");
 539 
 540     set_shared(age, page);
 541     _in_place = false;
 542 
 543     _lock.notify_all();
 544   }
 545 
 546   void free_target_page(ZPage* page) {
 547     // Does nothing
 548   }
 549 
 550   zaddress alloc_object(ZPage* page, size_t size) const {
 551     return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
 552   }
 553 
 554   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 555     page->undo_alloc_object_atomic(addr, size);
 556   }
 557 
 558   size_t in_place_count() const {
 559     return _in_place_count;
 560   }
 561 };
 562 
 563 template <typename Allocator>
 564 class ZRelocateWork : public StackObj {
 565 private:
 566   Allocator* const   _allocator;
 567   ZForwarding*       _forwarding;
 568   ZPage*             _target[ZAllocator::_relocation_allocators];
 569   ZGeneration* const _generation;
 570   size_t             _other_promoted;
 571   size_t             _other_compacted;
 572 
 573   ZPage* target(ZPageAge age) {
 574     return _target[static_cast<uint>(age) - 1];
 575   }
 576 
 577   void set_target(ZPageAge age, ZPage* page) {
 578     _target[static_cast<uint>(age) - 1] = page;
 579   }
 580 
 581   size_t object_alignment() const {
 582     return (size_t)1 << _forwarding->object_alignment_shift();
 583   }
 584 
 585   void increase_other_forwarded(size_t unaligned_object_size) {
 586     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 587     if (_forwarding->is_promotion()) {
 588       _other_promoted += aligned_size;
 589     } else {
 590       _other_compacted += aligned_size;
 591     }
 592   }
 593 
 594   zaddress try_relocate_object_inner(zaddress from_addr, size_t old_size) {
 595     ZForwardingCursor cursor;
 596     ZPage* const to_page = target(_forwarding->to_age());
 597     zoffset_end from_offset = to_zoffset_end(ZAddress::offset(from_addr));
 598     zoffset_end top = to_page != nullptr ? to_page->top() : to_zoffset_end(0);
 599     const size_t new_size = ZUtils::copy_size(from_addr, old_size);
 600     const size_t size = top == from_offset ? old_size : new_size;
 601 
 602     // Lookup forwarding
 603     {
 604       const zaddress to_addr = _forwarding->find(from_addr, &cursor);
 605       if (!is_null(to_addr)) {
 606         // Already relocated
 607         increase_other_forwarded(size);
 608         return to_addr;
 609       }
 610     }
 611 
 612     // Allocate object
 613     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 614     if (is_null(allocated_addr)) {
 615       // Allocation failed
 616       return zaddress::null;
 617     }
 618     if (old_size != new_size && ((top == from_offset) != (allocated_addr == from_addr))) {
 619       _allocator->undo_alloc_object(to_page, allocated_addr, size);
 620       return zaddress::null;
 621     }
 622 
 623     // Copy object. Use conjoint copying if we are relocating
 624     // in-place and the new object overlaps with the old object.
 625     if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
 626       ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
 627     } else {
 628       ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
 629     }
 630     if (from_addr != allocated_addr) {
 631       ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
 632     }
 633 
 634     // Insert forwarding
 635     const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
 636     if (to_addr != allocated_addr) {
 637       // Already relocated, undo allocation
 638       _allocator->undo_alloc_object(to_page, to_addr, size);
 639       increase_other_forwarded(size);
 640     }
 641 
 642     return to_addr;
 643   }
 644 
 645   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
 646     // Old-to-old relocation - move existing remset bits
 647 
 648     // If this is called for an in-place relocated page, then this code has the
 649     // responsibility to clear the old remset bits. Extra care is needed because:
 650     //
 651     // 1) The to-object copy can overlap with the from-object copy
 652     // 2) Remset bits of old objects need to be cleared
 653     //
 654     // A watermark is used to keep track of how far the old remset bits have been removed.
 655 
 656     const bool in_place = _forwarding->in_place_relocation();
 657     ZPage* const from_page = _forwarding->page();
 658     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 659 
 660     // Note: even with in-place relocation, the to_page could be another page
 661     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 662 
 663     // Uses _relaxed version to handle that in-place relocation resets _top
 664     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 665     assert(to_page->is_in(to_addr), "Must be");
 666 
 667     assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
 668     assert(size > 0, "size must be set");
 669 
 670     // If a young generation collection started while the old generation
 671     // relocated  objects, the remember set bits were flipped from "current"
 672     // to "previous".
 673     //
 674     // We need to select the correct remembered sets bitmap to ensure that the
 675     // old remset bits are found.
 676     //
 677     // Note that if the young generation marking (remset scanning) finishes
 678     // before the old generation relocation has relocated this page, then the
 679     // young generation will visit this page's previous remembered set bits and
 680     // moved them over to the current bitmap.
 681     //
 682     // If the young generation runs multiple cycles while the old generation is
 683     // relocating, then the first cycle will have consume the the old remset,
 684     // bits and moved associated objects to a new old page. The old relocation
 685     // could find either the the two bitmaps. So, either it will find the original
 686     // remset bits for the page, or it will find an empty bitmap for the page. It
 687     // doesn't matter for correctness, because the young generation marking has
 688     // already taken care of the bits.
 689 
 690     const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
 691 
 692     // When in-place relocation is done and the old remset bits are located in
 693     // the bitmap that is going to be used for the new remset bits, then we
 694     // need to clear the old bits before the new bits are inserted.
 695     const bool iterate_current_remset = active_remset_is_current && !in_place;
 696 
 697     BitMap::Iterator iter = iterate_current_remset
 698         ? from_page->remset_iterator_limited_current(from_local_offset, size)
 699         : from_page->remset_iterator_limited_previous(from_local_offset, size);
 700 
 701     for (BitMap::idx_t field_bit : iter) {
 702       const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
 703 
 704       // Add remset entry in the to-page
 705       const uintptr_t offset = field_local_offset - from_local_offset;
 706       const zaddress to_field = to_addr + offset;
 707       log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
 708           untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
 709 
 710       volatile zpointer* const p = (volatile zpointer*)to_field;
 711 
 712       if (ZGeneration::young()->is_phase_mark()) {
 713         // Young generation remembered set scanning needs to know about this
 714         // field. It will take responsibility to add a new remember set entry if needed.
 715         _forwarding->relocated_remembered_fields_register(p);
 716       } else {
 717         to_page->remember(p);
 718         if (in_place) {
 719           assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
 720         }
 721       }
 722     }
 723   }
 724 
 725   static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
 726     if (ZHeap::heap()->is_young(addr)) {
 727       ZRelocate::add_remset(p);
 728       return true;
 729     }
 730 
 731     return false;
 732   }
 733 
 734   static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
 735     const zpointer ptr = Atomic::load(p);
 736 
 737     assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
 738 
 739     if (ZPointer::is_store_good(ptr)) {
 740       // Already has a remset entry
 741       return;
 742     }
 743 
 744     if (ZPointer::is_load_good(ptr)) {
 745       if (!is_null_any(ptr)) {
 746         const zaddress addr = ZPointer::uncolor(ptr);
 747         add_remset_if_young(p, addr);
 748       }
 749       // No need to remap it is already load good
 750       return;
 751     }
 752 
 753     if (is_null_any(ptr)) {
 754       // Eagerly remap to skip adding a remset entry just to get deferred remapping
 755       ZBarrier::remap_young_relocated(p, ptr);
 756       return;
 757     }
 758 
 759     const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
 760     ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
 761 
 762     if (forwarding == nullptr) {
 763       // Object isn't being relocated
 764       const zaddress addr = safe(addr_unsafe);
 765       if (!add_remset_if_young(p, addr)) {
 766         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 767         ZBarrier::remap_young_relocated(p, ptr);
 768       }
 769       return;
 770     }
 771 
 772     const zaddress addr = forwarding->find(addr_unsafe);
 773 
 774     if (!is_null(addr)) {
 775       // Object has already been relocated
 776       if (!add_remset_if_young(p, addr)) {
 777         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 778         ZBarrier::remap_young_relocated(p, ptr);
 779       }
 780       return;
 781     }
 782 
 783     // Object has not been relocated yet
 784     // Don't want to eagerly relocate objects, so just add a remset
 785     ZRelocate::add_remset(p);
 786     return;
 787   }
 788 
 789   void update_remset_promoted(zaddress to_addr) const {
 790     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 791   }
 792 
 793   void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
 794     if (_forwarding->to_age() != ZPageAge::old) {
 795       // No remembered set in young pages
 796       return;
 797     }
 798 
 799     // Need to deal with remset when moving objects to the old generation
 800     if (_forwarding->from_age() == ZPageAge::old) {
 801       update_remset_old_to_old(from_addr, to_addr, size);
 802       return;
 803     }
 804 
 805     // Normal promotion
 806     update_remset_promoted(to_addr);
 807   }
 808 
 809   bool try_relocate_object(zaddress from_addr) {
 810     size_t size = ZUtils::object_size(from_addr);
 811     const zaddress to_addr = try_relocate_object_inner(from_addr, size);
 812 
 813     if (is_null(to_addr)) {
 814       return false;
 815     }
 816 
 817     update_remset_for_fields(from_addr, to_addr, size);
 818 
 819     return true;
 820   }
 821 
 822   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 823     if (_forwarding->from_age() != ZPageAge::old) {
 824       // Only old pages have use remset bits
 825       return;
 826     }
 827 
 828     if (ZGeneration::old()->active_remset_is_current()) {
 829       // We want to iterate over and clear the remset bits of the from-space page,
 830       // and insert current bits in the to-space page. However, with in-place
 831       // relocation, the from-space and to-space pages are the same. Clearing
 832       // is destructive, and is difficult to perform before or during the iteration.
 833       // However, clearing of the current bits has to be done before exposing the
 834       // to-space objects in the forwarding table.
 835       //
 836       // To solve this tricky dependency problem, we start by stashing away the
 837       // current bits in the previous bits, and clearing the current bits
 838       // (implemented by swapping the bits). This way, the current bits are
 839       // cleared before copying the objects (like a normal to-space page),
 840       // and the previous bits are representing a copy of the current bits
 841       // of the from-space page, and are used for iteration.
 842       from_page->swap_remset_bitmaps();
 843     }
 844   }
 845 
 846   ZPage* start_in_place_relocation(zoffset relocated_watermark) {
 847     _forwarding->in_place_relocation_claim_page();
 848     _forwarding->in_place_relocation_start(relocated_watermark);
 849 
 850     ZPage* const from_page = _forwarding->page();
 851 
 852     const ZPageAge to_age = _forwarding->to_age();
 853     const bool promotion = _forwarding->is_promotion();
 854 
 855     // Promotions happen through a new cloned page
 856     ZPage* const to_page = promotion ? from_page->clone_limited() : from_page;
 857 
 858     // Reset page for in-place relocation
 859     to_page->reset(to_age);
 860     to_page->reset_top_for_allocation();
 861     if (promotion) {
 862       to_page->remset_alloc();
 863     }
 864 
 865     // Verify that the inactive remset is clear when resetting the page for
 866     // in-place relocation.
 867     if (from_page->age() == ZPageAge::old) {
 868       if (ZGeneration::old()->active_remset_is_current()) {
 869         to_page->verify_remset_cleared_previous();
 870       } else {
 871         to_page->verify_remset_cleared_current();
 872       }
 873     }
 874 
 875     // Clear remset bits for all objects that were relocated
 876     // before this page became an in-place relocated page.
 877     start_in_place_relocation_prepare_remset(from_page);
 878 
 879     if (promotion) {
 880       // Register the the promotion
 881       ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
 882       ZGeneration::young()->register_in_place_relocate_promoted(from_page);
 883     }
 884 
 885     return to_page;
 886   }
 887 
 888   void relocate_object(oop obj) {
 889     const zaddress addr = to_zaddress(obj);
 890     assert(ZHeap::heap()->is_object_live(addr), "Should be live");
 891 
 892     while (!try_relocate_object(addr)) {
 893       // Allocate a new target page, or if that fails, use the page being
 894       // relocated as the new target, which will cause it to be relocated
 895       // in-place.
 896       const ZPageAge to_age = _forwarding->to_age();
 897       ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age));
 898       set_target(to_age, to_page);
 899       if (to_page != nullptr) {
 900         continue;
 901       }
 902 
 903       // Start in-place relocation to block other threads from accessing
 904       // the page, or its forwarding table, until it has been released
 905       // (relocation completed).
 906       to_page = start_in_place_relocation(ZAddress::offset(addr));
 907       set_target(to_age, to_page);
 908     }
 909   }
 910 
 911 public:
 912   ZRelocateWork(Allocator* allocator, ZGeneration* generation)
 913     : _allocator(allocator),
 914       _forwarding(nullptr),
 915       _target(),
 916       _generation(generation),
 917       _other_promoted(0),
 918       _other_compacted(0) {}
 919 
 920   ~ZRelocateWork() {
 921     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 922       _allocator->free_target_page(_target[i]);
 923     }
 924     // Report statistics on-behalf of non-worker threads
 925     _generation->increase_promoted(_other_promoted);
 926     _generation->increase_compacted(_other_compacted);
 927   }
 928 
 929   bool active_remset_is_current() const {
 930     // Normal old-to-old relocation can treat the from-page remset as a
 931     // read-only copy, and then copy over the appropriate remset bits to the
 932     // cleared to-page's 'current' remset bitmap.
 933     //
 934     // In-place relocation is more complicated. Since, the same page is both
 935     // a from-page and a to-page, we need to remove the old remset bits, and
 936     // add remset bits that corresponds to the new locations of the relocated
 937     // objects.
 938     //
 939     // Depending on how long ago (in terms of number of young GC's and the
 940     // current young GC's phase), the page was allocated, the active
 941     // remembered set will be in either the 'current' or 'previous' bitmap.
 942     //
 943     // If the active bits are in the 'previous' bitmap, we know that the
 944     // 'current' bitmap was cleared at some earlier point in time, and we can
 945     // simply set new bits in 'current' bitmap, and later when relocation has
 946     // read all the old remset bits, we could just clear the 'previous' remset
 947     // bitmap.
 948     //
 949     // If, on the other hand, the active bits are in the 'current' bitmap, then
 950     // that bitmap will be used to both read the old remset bits, and the
 951     // destination for the remset bits that we copy when an object is copied
 952     // to it's new location within the page. We need to *carefully* remove all
 953     // all old remset bits, without clearing out the newly set bits.
 954     return ZGeneration::old()->active_remset_is_current();
 955   }
 956 
 957   void clear_remset_before_in_place_reuse(ZPage* page) {
 958     if (_forwarding->from_age() != ZPageAge::old) {
 959       // No remset bits
 960       return;
 961     }
 962 
 963     // Clear 'previous' remset bits. For in-place relocated pages, the previous
 964     // remset bits are always used, even when active_remset_is_current().
 965     page->clear_remset_previous();
 966   }
 967 
 968   void finish_in_place_relocation() {
 969     // We are done with the from_space copy of the page
 970     _forwarding->in_place_relocation_finish();
 971   }
 972 
 973   void do_forwarding(ZForwarding* forwarding) {
 974     _forwarding = forwarding;
 975 
 976     _forwarding->page()->log_msg(" (relocate page)");
 977 
 978     ZVerify::before_relocation(_forwarding);
 979 
 980     // Relocate objects
 981     _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
 982 
 983     ZVerify::after_relocation(_forwarding);
 984 
 985     // Verify
 986     if (ZVerifyForwarding) {
 987       _forwarding->verify();
 988     }
 989 
 990     _generation->increase_freed(_forwarding->page()->size());
 991 
 992     // Deal with in-place relocation
 993     const bool in_place = _forwarding->in_place_relocation();
 994     if (in_place) {
 995       finish_in_place_relocation();
 996     }
 997 
 998     // Old from-space pages need to deal with remset bits
 999     if (_forwarding->from_age() == ZPageAge::old) {
1000       _forwarding->relocated_remembered_fields_after_relocate();
1001     }
1002 
1003     // Release relocated page
1004     _forwarding->release_page();
1005 
1006     if (in_place) {
1007       // Wait for all other threads to call release_page
1008       ZPage* const page = _forwarding->detach_page();
1009 
1010       // Ensure that previous remset bits are cleared
1011       clear_remset_before_in_place_reuse(page);
1012 
1013       page->log_msg(" (relocate page done in-place)");
1014 
1015       // Different pages when promoting
1016       ZPage* const target_page = target(_forwarding->to_age());
1017       _allocator->share_target_page(target_page);
1018 
1019     } else {
1020       // Wait for all other threads to call release_page
1021       ZPage* const page = _forwarding->detach_page();
1022 
1023       page->log_msg(" (relocate page done normal)");
1024 
1025       // Free page
1026       ZHeap::heap()->free_page(page, true /* allow_defragment */);
1027     }
1028   }
1029 };
1030 
1031 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1032 public:
1033   virtual void do_thread(Thread* thread) {
1034     JavaThread* const jt = JavaThread::cast(thread);
1035     ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1036     buffer->install_base_pointers();
1037   }
1038 };
1039 
1040 // Installs the object base pointers (object starts), for the fields written
1041 // in the store buffer. The code that searches for the object start uses that
1042 // liveness information stored in the pages. That information is lost when the
1043 // pages have been relocated and then destroyed.
1044 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1045 private:
1046   ZJavaThreadsIterator _threads_iter;
1047 
1048 public:
1049   ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1050     : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1051       _threads_iter(generation->id_optional()) {}
1052 
1053   virtual void work() {
1054     ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1055     _threads_iter.apply(&fix_store_buffer_cl);
1056   }
1057 };
1058 
1059 class ZRelocateTask : public ZRestartableTask {
1060 private:
1061   ZRelocationSetParallelIterator _iter;
1062   ZGeneration* const             _generation;
1063   ZRelocateQueue* const          _queue;
1064   ZRelocateSmallAllocator        _small_allocator;
1065   ZRelocateMediumAllocator       _medium_allocator;
1066 
1067 public:
1068   ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
1069     : ZRestartableTask("ZRelocateTask"),
1070       _iter(relocation_set),
1071       _generation(relocation_set->generation()),
1072       _queue(queue),
1073       _small_allocator(_generation),
1074       _medium_allocator(_generation) {}
1075 
1076   ~ZRelocateTask() {
1077     _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1078 
1079     // Signal that we're not using the queue anymore. Used mostly for asserts.
1080     _queue->deactivate();
1081   }
1082 
1083   virtual void work() {
1084     ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation);
1085     ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation);
1086 
1087     const auto do_forwarding = [&](ZForwarding* forwarding) {
1088       ZPage* const page = forwarding->page();
1089       if (page->is_small()) {
1090         small.do_forwarding(forwarding);
1091       } else {
1092         medium.do_forwarding(forwarding);
1093       }
1094 
1095       // Absolute last thing done while relocating a page.
1096       //
1097       // We don't use the SuspendibleThreadSet when relocating pages.
1098       // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1099       //
1100       // After the mark_done call a safepointing could be completed and a
1101       // new GC phase could be entered.
1102       forwarding->mark_done();
1103     };
1104 
1105     const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1106       if (forwarding->claim()) {
1107         do_forwarding(forwarding);
1108       }
1109     };
1110 
1111     const auto do_forwarding_one_from_iter = [&]() {
1112       ZForwarding* forwarding;
1113 
1114       if (_iter.next(&forwarding)) {
1115         claim_and_do_forwarding(forwarding);
1116         return true;
1117       }
1118 
1119       return false;
1120     };
1121 
1122     for (;;) {
1123       // As long as there are requests in the relocate queue, there are threads
1124       // waiting in a VM state that does not allow them to be blocked. The
1125       // worker thread needs to finish relocate these pages, and allow the
1126       // other threads to continue and proceed to a blocking state. After that,
1127       // the worker threads are allowed to safepoint synchronize.
1128       for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1129         do_forwarding(forwarding);
1130       }
1131 
1132       if (!do_forwarding_one_from_iter()) {
1133         // No more work
1134         break;
1135       }
1136 
1137       if (_generation->should_worker_resize()) {
1138         break;
1139       }
1140     }
1141 
1142     _queue->leave();
1143   }
1144 
1145   virtual void resize_workers(uint nworkers) {
1146     _queue->resize_workers(nworkers);
1147   }
1148 };
1149 
1150 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1151   const zpointer ptr = Atomic::load(p);
1152 
1153   if (ZPointer::is_store_good(ptr)) {
1154     // Already has a remset entry
1155     return;
1156   }
1157 
1158   // Remset entries are used for two reasons:
1159   // 1) Young marking old-to-young pointer roots
1160   // 2) Deferred remapping of stale old-to-young pointers
1161   //
1162   // This load barrier will up-front perform the remapping of (2),
1163   // and the code below only has to make sure we register up-to-date
1164   // old-to-young pointers for (1).
1165   const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1166 
1167   if (is_null(addr)) {
1168     // No need for remset entries for null pointers
1169     return;
1170   }
1171 
1172   if (ZHeap::heap()->is_old(addr)) {
1173     // No need for remset entries for pointers to old gen
1174     return;
1175   }
1176 
1177   ZRelocate::add_remset(p);
1178 }
1179 
1180 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1181 private:
1182   ZStatTimerYoung                _timer;
1183   ZArrayParallelIterator<ZPage*> _iter;
1184 
1185 public:
1186   ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1187     : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1188       _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1189       _iter(pages) {}
1190 
1191   virtual void work() {
1192     SuspendibleThreadSetJoiner sts_joiner;
1193 
1194     for (ZPage* page; _iter.next(&page);) {
1195       page->object_iterate([&](oop obj) {
1196         ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1197       });
1198 
1199       SuspendibleThreadSet::yield();
1200       if (ZGeneration::young()->should_worker_resize()) {
1201         return;
1202       }
1203     }
1204   }
1205 };
1206 
1207 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1208   {
1209     // Install the store buffer's base pointers before the
1210     // relocate task destroys the liveness information in
1211     // the relocated pages.
1212     ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1213     workers()->run(&buffer_task);
1214   }
1215 
1216   {
1217     ZRelocateTask relocate_task(relocation_set, &_queue);
1218     workers()->run(&relocate_task);
1219   }
1220 
1221   if (relocation_set->generation()->is_young()) {
1222     ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1223     workers()->run(&task);
1224   }
1225 }
1226 
1227 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1228   if (from_age == ZPageAge::old) {
1229     return ZPageAge::old;
1230   }
1231 
1232   const uint age = static_cast<uint>(from_age);
1233   if (age >= ZGeneration::young()->tenuring_threshold()) {
1234     return ZPageAge::old;
1235   }
1236 
1237   return static_cast<ZPageAge>(age + 1);
1238 }
1239 
1240 class ZFlipAgePagesTask : public ZTask {
1241 private:
1242   ZArrayParallelIterator<ZPage*> _iter;
1243 
1244 public:
1245   ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1246     : ZTask("ZPromotePagesTask"),
1247       _iter(pages) {}
1248 
1249   virtual void work() {
1250     SuspendibleThreadSetJoiner sts_joiner;
1251     ZArray<ZPage*> promoted_pages;
1252 
1253     for (ZPage* prev_page; _iter.next(&prev_page);) {
1254       const ZPageAge from_age = prev_page->age();
1255       const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1256       assert(from_age != ZPageAge::old, "invalid age for a young collection");
1257 
1258       // Figure out if this is proper promotion
1259       const bool promotion = to_age == ZPageAge::old;
1260 
1261       if (promotion) {
1262         // Before promoting an object (and before relocate start), we must ensure that all
1263         // contained zpointers are store good. The marking code ensures that for non-null
1264         // pointers, but null pointers are ignored. This code ensures that even null pointers
1265         // are made store good, for the promoted objects.
1266         prev_page->object_iterate([&](oop obj) {
1267           ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1268         });
1269       }
1270 
1271       // Logging
1272       prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1273 
1274       // Setup to-space page
1275       ZPage* const new_page = promotion ? prev_page->clone_limited() : prev_page;
1276 
1277       // Reset page for flip aging
1278       new_page->reset(to_age);
1279       new_page->reset_livemap();
1280       if (promotion) {
1281         new_page->remset_alloc();
1282       }
1283 
1284       if (promotion) {
1285         ZGeneration::young()->flip_promote(prev_page, new_page);
1286         // Defer promoted page registration times the lock is taken
1287         promoted_pages.push(prev_page);
1288       }
1289 
1290       SuspendibleThreadSet::yield();
1291     }
1292 
1293     ZGeneration::young()->register_flip_promoted(promoted_pages);
1294   }
1295 };
1296 
1297 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1298   ZFlipAgePagesTask flip_age_task(pages);
1299   workers()->run(&flip_age_task);
1300 }
1301 
1302 void ZRelocate::synchronize() {
1303   _queue.synchronize();
1304 }
1305 
1306 void ZRelocate::desynchronize() {
1307   _queue.desynchronize();
1308 }
1309 
1310 ZRelocateQueue* ZRelocate::queue() {
1311   return &_queue;
1312 }
1313 
1314 bool ZRelocate::is_queue_active() const {
1315   return _queue.is_active();
1316 }