1 /*
   2  * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "gc/shared/gc_globals.hpp"
  25 #include "gc/shared/suspendibleThreadSet.hpp"
  26 #include "gc/z/zAbort.inline.hpp"
  27 #include "gc/z/zAddress.inline.hpp"
  28 #include "gc/z/zAllocator.inline.hpp"
  29 #include "gc/z/zBarrier.inline.hpp"
  30 #include "gc/z/zCollectedHeap.hpp"
  31 #include "gc/z/zForwarding.inline.hpp"
  32 #include "gc/z/zGeneration.inline.hpp"
  33 #include "gc/z/zHeap.inline.hpp"
  34 #include "gc/z/zIndexDistributor.inline.hpp"
  35 #include "gc/z/zIterator.inline.hpp"
  36 #include "gc/z/zPage.inline.hpp"
  37 #include "gc/z/zPageAge.hpp"
  38 #include "gc/z/zRelocate.hpp"
  39 #include "gc/z/zRelocationSet.inline.hpp"
  40 #include "gc/z/zRootsIterator.hpp"
  41 #include "gc/z/zStackWatermark.hpp"
  42 #include "gc/z/zStat.hpp"
  43 #include "gc/z/zTask.hpp"
  44 #include "gc/z/zUncoloredRoot.inline.hpp"
  45 #include "gc/z/zVerify.hpp"
  46 #include "gc/z/zWorkers.hpp"
  47 #include "prims/jvmtiTagMap.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "utilities/debug.hpp"
  50 
  51 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
  52 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
  53 
  54 ZRelocateQueue::ZRelocateQueue()
  55   : _lock(),
  56     _queue(),
  57     _nworkers(0),
  58     _nsynchronized(0),
  59     _synchronize(false),
  60     _is_active(false),
  61     _needs_attention(0) {}
  62 
  63 bool ZRelocateQueue::needs_attention() const {
  64   return Atomic::load(&_needs_attention) != 0;
  65 }
  66 
  67 void ZRelocateQueue::inc_needs_attention() {
  68   const int needs_attention = Atomic::add(&_needs_attention, 1);
  69   assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
  70 }
  71 
  72 void ZRelocateQueue::dec_needs_attention() {
  73   const int needs_attention = Atomic::sub(&_needs_attention, 1);
  74   assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
  75 }
  76 
  77 void ZRelocateQueue::activate(uint nworkers) {
  78   _is_active = true;
  79   join(nworkers);
  80 }
  81 
  82 void ZRelocateQueue::deactivate() {
  83   Atomic::store(&_is_active, false);
  84   clear();
  85 }
  86 
  87 bool ZRelocateQueue::is_active() const {
  88   return Atomic::load(&_is_active);
  89 }
  90 
  91 void ZRelocateQueue::join(uint nworkers) {
  92   assert(nworkers != 0, "Must request at least one worker");
  93   assert(_nworkers == 0, "Invalid state");
  94   assert(_nsynchronized == 0, "Invalid state");
  95 
  96   log_debug(gc, reloc)("Joining workers: %u", nworkers);
  97 
  98   _nworkers = nworkers;
  99 }
 100 
 101 void ZRelocateQueue::resize_workers(uint nworkers) {
 102   assert(nworkers != 0, "Must request at least one worker");
 103   assert(_nworkers == 0, "Invalid state");
 104   assert(_nsynchronized == 0, "Invalid state");
 105 
 106   log_debug(gc, reloc)("Resize workers: %u", nworkers);
 107 
 108   ZLocker<ZConditionLock> locker(&_lock);
 109   _nworkers = nworkers;
 110 }
 111 
 112 void ZRelocateQueue::leave() {
 113   ZLocker<ZConditionLock> locker(&_lock);
 114   _nworkers--;
 115 
 116   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 117 
 118   log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
 119 
 120   // Prune done forwardings
 121   const bool forwardings_done = prune();
 122 
 123   // Check if all workers synchronized
 124   const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
 125 
 126   if (forwardings_done || last_synchronized) {
 127     _lock.notify_all();
 128   }
 129 }
 130 
 131 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
 132   ZStatTimer timer(ZCriticalPhaseRelocationStall);
 133   ZLocker<ZConditionLock> locker(&_lock);
 134 
 135   if (forwarding->is_done()) {
 136     return;
 137   }
 138 
 139   _queue.append(forwarding);
 140   if (_queue.length() == 1) {
 141     // Queue became non-empty
 142     inc_needs_attention();
 143     _lock.notify_all();
 144   }
 145 
 146   while (!forwarding->is_done()) {
 147     _lock.wait();
 148   }
 149 }
 150 
 151 bool ZRelocateQueue::prune() {
 152   if (_queue.is_empty()) {
 153     return false;
 154   }
 155 
 156   bool done = false;
 157 
 158   for (int i = 0; i < _queue.length();) {
 159     const ZForwarding* const forwarding = _queue.at(i);
 160     if (forwarding->is_done()) {
 161       done = true;
 162 
 163       _queue.delete_at(i);
 164     } else {
 165       i++;
 166     }
 167   }
 168 
 169   if (_queue.is_empty()) {
 170     dec_needs_attention();
 171   }
 172 
 173   return done;
 174 }
 175 
 176 ZForwarding* ZRelocateQueue::prune_and_claim() {
 177   if (prune()) {
 178     _lock.notify_all();
 179   }
 180 
 181   for (int i = 0; i < _queue.length(); i++) {
 182     ZForwarding* const forwarding = _queue.at(i);
 183     if (forwarding->claim()) {
 184       return forwarding;
 185     }
 186   }
 187 
 188   return nullptr;
 189 }
 190 
 191 class ZRelocateQueueSynchronizeThread {
 192 private:
 193   ZRelocateQueue* const _queue;
 194 
 195 public:
 196   ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
 197     : _queue(queue) {
 198     _queue->synchronize_thread();
 199   }
 200 
 201   ~ZRelocateQueueSynchronizeThread() {
 202     _queue->desynchronize_thread();
 203   }
 204 };
 205 
 206 void ZRelocateQueue::synchronize_thread() {
 207   _nsynchronized++;
 208 
 209   log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
 210 
 211   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 212   if (_nsynchronized == _nworkers) {
 213     // All workers synchronized
 214     _lock.notify_all();
 215   }
 216 }
 217 
 218 void ZRelocateQueue::desynchronize_thread() {
 219   _nsynchronized--;
 220 
 221   log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
 222 
 223   assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 224 }
 225 
 226 ZForwarding* ZRelocateQueue::synchronize_poll() {
 227   // Fast path avoids locking
 228   if (!needs_attention()) {
 229     return nullptr;
 230   }
 231 
 232   // Slow path to get the next forwarding and/or synchronize
 233   ZLocker<ZConditionLock> locker(&_lock);
 234 
 235   {
 236     ZForwarding* const forwarding = prune_and_claim();
 237     if (forwarding != nullptr) {
 238       // Don't become synchronized while there are elements in the queue
 239       return forwarding;
 240     }
 241   }
 242 
 243   if (!_synchronize) {
 244     return nullptr;
 245   }
 246 
 247   ZRelocateQueueSynchronizeThread rqst(this);
 248 
 249   do {
 250     _lock.wait();
 251 
 252     ZForwarding* const forwarding = prune_and_claim();
 253     if (forwarding != nullptr) {
 254       return forwarding;
 255     }
 256   } while (_synchronize);
 257 
 258   return nullptr;
 259 }
 260 
 261 void ZRelocateQueue::clear() {
 262   assert(_nworkers == 0, "Invalid state");
 263 
 264   if (_queue.is_empty()) {
 265     return;
 266   }
 267 
 268   ZArrayIterator<ZForwarding*> iter(&_queue);
 269   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 270     assert(forwarding->is_done(), "All should be done");
 271   }
 272 
 273   assert(false, "Clear was not empty");
 274 
 275   _queue.clear();
 276   dec_needs_attention();
 277 }
 278 
 279 void ZRelocateQueue::synchronize() {
 280   ZLocker<ZConditionLock> locker(&_lock);
 281   _synchronize = true;
 282 
 283   inc_needs_attention();
 284 
 285   log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 286 
 287   while (_nworkers != _nsynchronized) {
 288     _lock.wait();
 289     log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 290   }
 291 }
 292 
 293 void ZRelocateQueue::desynchronize() {
 294   ZLocker<ZConditionLock> locker(&_lock);
 295   _synchronize = false;
 296 
 297   log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 298 
 299   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 300 
 301   dec_needs_attention();
 302 
 303   _lock.notify_all();
 304 }
 305 
 306 ZRelocate::ZRelocate(ZGeneration* generation)
 307   : _generation(generation),
 308     _queue() {}
 309 
 310 ZWorkers* ZRelocate::workers() const {
 311   return _generation->workers();
 312 }
 313 
 314 void ZRelocate::start() {
 315   _queue.activate(workers()->active_workers());
 316 }
 317 
 318 void ZRelocate::add_remset(volatile zpointer* p) {
 319   ZGeneration::young()->remember(p);
 320 }
 321 
 322 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 323   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 324 
 325   // Allocate object
 326   const size_t old_size = ZUtils::object_size(from_addr);
 327   const size_t size = ZUtils::copy_size(from_addr, old_size);
 328 
 329   ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
 330 
 331   const zaddress to_addr = allocator->alloc_object(size);
 332 
 333   if (is_null(to_addr)) {
 334     // Allocation failed
 335     return zaddress::null;
 336   }
 337   assert(to_addr != from_addr, "addresses must be different");
 338 
 339   // Copy object
 340   ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
 341   ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
 342 
 343   // Insert forwarding
 344   const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
 345 
 346   if (to_addr_final != to_addr) {
 347     // Already relocated, try undo allocation
 348     allocator->undo_alloc_object(to_addr, size);
 349   }
 350 
 351   return to_addr_final;
 352 }
 353 
 354 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 355   ZForwardingCursor cursor;
 356 
 357   // Lookup forwarding
 358   zaddress to_addr = forwarding->find(from_addr, &cursor);
 359   if (!is_null(to_addr)) {
 360     // Already relocated
 361     return to_addr;
 362   }
 363 
 364   // Relocate object
 365   if (forwarding->retain_page(&_queue)) {
 366     assert(_generation->is_phase_relocate(), "Must be");
 367     to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
 368     forwarding->release_page();
 369 
 370     if (!is_null(to_addr)) {
 371       // Success
 372       return to_addr;
 373     }
 374 
 375     // Failed to relocate object. Signal and wait for a worker thread to
 376     // complete relocation of this page, and then forward the object.
 377     _queue.add_and_wait(forwarding);
 378   }
 379 
 380   // Forward object
 381   return forward_object(forwarding, from_addr);
 382 }
 383 
 384 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 385   const zaddress to_addr = forwarding->find(from_addr);
 386   assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
 387   return to_addr;
 388 }
 389 
 390 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) {
 391   if (ZStressRelocateInPlace) {
 392     // Simulate failure to allocate a new page. This will
 393     // cause the page being relocated to be relocated in-place.
 394     return nullptr;
 395   }
 396 
 397   ZAllocationFlags flags;
 398   flags.set_non_blocking();
 399   flags.set_gc_relocation();
 400 
 401   return allocator->alloc_page_for_relocation(type, size, flags);
 402 }
 403 
 404 static void retire_target_page(ZGeneration* generation, ZPage* page) {
 405   if (generation->is_young() && page->is_old()) {
 406     generation->increase_promoted(page->used());
 407   } else {
 408     generation->increase_compacted(page->used());
 409   }
 410 
 411   // Free target page if it is empty. We can end up with an empty target
 412   // page if we allocated a new target page, and then lost the race to
 413   // relocate the remaining objects, leaving the target page empty when
 414   // relocation completed.
 415   if (page->used() == 0) {
 416     ZHeap::heap()->free_page(page, true /* allow_defragment */);
 417   }
 418 }
 419 
 420 class ZRelocateSmallAllocator {
 421 private:
 422   ZGeneration* const _generation;
 423   volatile size_t    _in_place_count;
 424 
 425 public:
 426   ZRelocateSmallAllocator(ZGeneration* generation)
 427     : _generation(generation),
 428       _in_place_count(0) {}
 429 
 430   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 431     ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 432     ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size());
 433     if (page == nullptr) {
 434       Atomic::inc(&_in_place_count);
 435     }
 436 
 437     if (target != nullptr) {
 438       // Retire the old target page
 439       retire_target_page(_generation, target);
 440     }
 441 
 442     return page;
 443   }
 444 
 445   void share_target_page(ZPage* page) {
 446     // Does nothing
 447   }
 448 
 449   void free_target_page(ZPage* page) {
 450     if (page != nullptr) {
 451       retire_target_page(_generation, page);
 452     }
 453   }
 454 
 455   zaddress alloc_object(ZPage* page, size_t size) const {
 456     return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
 457   }
 458 
 459   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 460     page->undo_alloc_object(addr, size);
 461   }
 462 
 463   size_t in_place_count() const {
 464     return _in_place_count;
 465   }
 466 };
 467 
 468 class ZRelocateMediumAllocator {
 469 private:
 470   ZGeneration* const _generation;
 471   ZConditionLock     _lock;
 472   ZPage*             _shared[ZAllocator::_relocation_allocators];
 473   bool               _in_place;
 474   volatile size_t    _in_place_count;
 475 
 476 public:
 477   ZRelocateMediumAllocator(ZGeneration* generation)
 478     : _generation(generation),
 479       _lock(),
 480       _shared(),
 481       _in_place(false),
 482       _in_place_count(0) {}
 483 
 484   ~ZRelocateMediumAllocator() {
 485     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 486       if (_shared[i] != nullptr) {
 487         retire_target_page(_generation, _shared[i]);
 488       }
 489     }
 490   }
 491 
 492   ZPage* shared(ZPageAge age) {
 493     return _shared[static_cast<uint>(age) - 1];
 494   }
 495 
 496   void set_shared(ZPageAge age, ZPage* page) {
 497     _shared[static_cast<uint>(age) - 1] = page;
 498   }
 499 
 500   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 501     ZLocker<ZConditionLock> locker(&_lock);
 502 
 503     // Wait for any ongoing in-place relocation to complete
 504     while (_in_place) {
 505       _lock.wait();
 506     }
 507 
 508     // Allocate a new page only if the shared page is the same as the
 509     // current target page. The shared page will be different from the
 510     // current target page if another thread shared a page, or allocated
 511     // a new page.
 512     const ZPageAge to_age = forwarding->to_age();
 513     if (shared(to_age) == target) {
 514       ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 515       ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size());
 516       set_shared(to_age, to_page);
 517       if (to_page == nullptr) {
 518         Atomic::inc(&_in_place_count);
 519         _in_place = true;
 520       }
 521 
 522       // This thread is responsible for retiring the shared target page
 523       if (target != nullptr) {
 524         retire_target_page(_generation, target);
 525       }
 526     }
 527 
 528     return shared(to_age);
 529   }
 530 
 531   void share_target_page(ZPage* page) {
 532     const ZPageAge age = page->age();
 533 
 534     ZLocker<ZConditionLock> locker(&_lock);
 535     assert(_in_place, "Invalid state");
 536     assert(shared(age) == nullptr, "Invalid state");
 537     assert(page != nullptr, "Invalid page");
 538 
 539     set_shared(age, page);
 540     _in_place = false;
 541 
 542     _lock.notify_all();
 543   }
 544 
 545   void free_target_page(ZPage* page) {
 546     // Does nothing
 547   }
 548 
 549   zaddress alloc_object(ZPage* page, size_t size) const {
 550     return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
 551   }
 552 
 553   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 554     page->undo_alloc_object_atomic(addr, size);
 555   }
 556 
 557   size_t in_place_count() const {
 558     return _in_place_count;
 559   }
 560 };
 561 
 562 template <typename Allocator>
 563 class ZRelocateWork : public StackObj {
 564 private:
 565   Allocator* const   _allocator;
 566   ZForwarding*       _forwarding;
 567   ZPage*             _target[ZAllocator::_relocation_allocators];
 568   ZGeneration* const _generation;
 569   size_t             _other_promoted;
 570   size_t             _other_compacted;
 571 
 572   ZPage* target(ZPageAge age) {
 573     return _target[static_cast<uint>(age) - 1];
 574   }
 575 
 576   void set_target(ZPageAge age, ZPage* page) {
 577     _target[static_cast<uint>(age) - 1] = page;
 578   }
 579 
 580   size_t object_alignment() const {
 581     return (size_t)1 << _forwarding->object_alignment_shift();
 582   }
 583 
 584   void increase_other_forwarded(size_t unaligned_object_size) {
 585     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 586     if (_forwarding->is_promotion()) {
 587       _other_promoted += aligned_size;
 588     } else {
 589       _other_compacted += aligned_size;
 590     }
 591   }
 592 
 593   zaddress try_relocate_object_inner(zaddress from_addr, size_t old_size) {
 594     ZForwardingCursor cursor;
 595     ZPage* const to_page = target(_forwarding->to_age());
 596     zoffset_end from_offset = to_zoffset_end(ZAddress::offset(from_addr));
 597     zoffset_end top = to_page != nullptr ? to_page->top() : to_zoffset_end(0);
 598     const size_t new_size = ZUtils::copy_size(from_addr, old_size);
 599     const size_t size = top == from_offset ? old_size : new_size;
 600 
 601     // Lookup forwarding
 602     {
 603       const zaddress to_addr = _forwarding->find(from_addr, &cursor);
 604       if (!is_null(to_addr)) {
 605         // Already relocated
 606         increase_other_forwarded(size);
 607         return to_addr;
 608       }
 609     }
 610 
 611     // Allocate object
 612     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 613     if (is_null(allocated_addr)) {
 614       // Allocation failed
 615       return zaddress::null;
 616     }
 617     if (old_size != new_size && ((top == from_offset) != (allocated_addr == from_addr))) {
 618       _allocator->undo_alloc_object(to_page, allocated_addr, size);
 619       return zaddress::null;
 620     }
 621 
 622     // Copy object. Use conjoint copying if we are relocating
 623     // in-place and the new object overlaps with the old object.
 624     if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
 625       ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
 626     } else {
 627       ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
 628     }
 629     if (from_addr != allocated_addr) {
 630       ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
 631     }
 632 
 633     // Insert forwarding
 634     const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
 635     if (to_addr != allocated_addr) {
 636       // Already relocated, undo allocation
 637       _allocator->undo_alloc_object(to_page, to_addr, size);
 638       increase_other_forwarded(size);
 639     }
 640 
 641     return to_addr;
 642   }
 643 
 644   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
 645     // Old-to-old relocation - move existing remset bits
 646 
 647     // If this is called for an in-place relocated page, then this code has the
 648     // responsibility to clear the old remset bits. Extra care is needed because:
 649     //
 650     // 1) The to-object copy can overlap with the from-object copy
 651     // 2) Remset bits of old objects need to be cleared
 652     //
 653     // A watermark is used to keep track of how far the old remset bits have been removed.
 654 
 655     const bool in_place = _forwarding->in_place_relocation();
 656     ZPage* const from_page = _forwarding->page();
 657     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 658 
 659     // Note: even with in-place relocation, the to_page could be another page
 660     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 661 
 662     // Uses _relaxed version to handle that in-place relocation resets _top
 663     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 664     assert(to_page->is_in(to_addr), "Must be");
 665 
 666     assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
 667     assert(size > 0, "size must be set");
 668 
 669     // If a young generation collection started while the old generation
 670     // relocated  objects, the remember set bits were flipped from "current"
 671     // to "previous".
 672     //
 673     // We need to select the correct remembered sets bitmap to ensure that the
 674     // old remset bits are found.
 675     //
 676     // Note that if the young generation marking (remset scanning) finishes
 677     // before the old generation relocation has relocated this page, then the
 678     // young generation will visit this page's previous remembered set bits and
 679     // moved them over to the current bitmap.
 680     //
 681     // If the young generation runs multiple cycles while the old generation is
 682     // relocating, then the first cycle will have consume the the old remset,
 683     // bits and moved associated objects to a new old page. The old relocation
 684     // could find either the the two bitmaps. So, either it will find the original
 685     // remset bits for the page, or it will find an empty bitmap for the page. It
 686     // doesn't matter for correctness, because the young generation marking has
 687     // already taken care of the bits.
 688 
 689     const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
 690 
 691     // When in-place relocation is done and the old remset bits are located in
 692     // the bitmap that is going to be used for the new remset bits, then we
 693     // need to clear the old bits before the new bits are inserted.
 694     const bool iterate_current_remset = active_remset_is_current && !in_place;
 695 
 696     BitMap::Iterator iter = iterate_current_remset
 697         ? from_page->remset_iterator_limited_current(from_local_offset, size)
 698         : from_page->remset_iterator_limited_previous(from_local_offset, size);
 699 
 700     for (BitMap::idx_t field_bit : iter) {
 701       const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
 702 
 703       // Add remset entry in the to-page
 704       const uintptr_t offset = field_local_offset - from_local_offset;
 705       const zaddress to_field = to_addr + offset;
 706       log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
 707           untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
 708 
 709       volatile zpointer* const p = (volatile zpointer*)to_field;
 710 
 711       if (ZGeneration::young()->is_phase_mark()) {
 712         // Young generation remembered set scanning needs to know about this
 713         // field. It will take responsibility to add a new remember set entry if needed.
 714         _forwarding->relocated_remembered_fields_register(p);
 715       } else {
 716         to_page->remember(p);
 717         if (in_place) {
 718           assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
 719         }
 720       }
 721     }
 722   }
 723 
 724   static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
 725     if (ZHeap::heap()->is_young(addr)) {
 726       ZRelocate::add_remset(p);
 727       return true;
 728     }
 729 
 730     return false;
 731   }
 732 
 733   static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
 734     const zpointer ptr = Atomic::load(p);
 735 
 736     assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
 737 
 738     if (ZPointer::is_store_good(ptr)) {
 739       // Already has a remset entry
 740       return;
 741     }
 742 
 743     if (ZPointer::is_load_good(ptr)) {
 744       if (!is_null_any(ptr)) {
 745         const zaddress addr = ZPointer::uncolor(ptr);
 746         add_remset_if_young(p, addr);
 747       }
 748       // No need to remap it is already load good
 749       return;
 750     }
 751 
 752     if (is_null_any(ptr)) {
 753       // Eagerly remap to skip adding a remset entry just to get deferred remapping
 754       ZBarrier::remap_young_relocated(p, ptr);
 755       return;
 756     }
 757 
 758     const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
 759     ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
 760 
 761     if (forwarding == nullptr) {
 762       // Object isn't being relocated
 763       const zaddress addr = safe(addr_unsafe);
 764       if (!add_remset_if_young(p, addr)) {
 765         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 766         ZBarrier::remap_young_relocated(p, ptr);
 767       }
 768       return;
 769     }
 770 
 771     const zaddress addr = forwarding->find(addr_unsafe);
 772 
 773     if (!is_null(addr)) {
 774       // Object has already been relocated
 775       if (!add_remset_if_young(p, addr)) {
 776         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 777         ZBarrier::remap_young_relocated(p, ptr);
 778       }
 779       return;
 780     }
 781 
 782     // Object has not been relocated yet
 783     // Don't want to eagerly relocate objects, so just add a remset
 784     ZRelocate::add_remset(p);
 785     return;
 786   }
 787 
 788   void update_remset_promoted(zaddress to_addr) const {
 789     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 790   }
 791 
 792   void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
 793     if (_forwarding->to_age() != ZPageAge::old) {
 794       // No remembered set in young pages
 795       return;
 796     }
 797 
 798     // Need to deal with remset when moving objects to the old generation
 799     if (_forwarding->from_age() == ZPageAge::old) {
 800       update_remset_old_to_old(from_addr, to_addr, size);
 801       return;
 802     }
 803 
 804     // Normal promotion
 805     update_remset_promoted(to_addr);
 806   }
 807 
 808   bool try_relocate_object(zaddress from_addr) {
 809     size_t size = ZUtils::object_size(from_addr);
 810     const zaddress to_addr = try_relocate_object_inner(from_addr, size);
 811 
 812     if (is_null(to_addr)) {
 813       return false;
 814     }
 815 
 816     update_remset_for_fields(from_addr, to_addr, size);
 817 
 818     return true;
 819   }
 820 
 821   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 822     if (_forwarding->from_age() != ZPageAge::old) {
 823       // Only old pages have use remset bits
 824       return;
 825     }
 826 
 827     if (ZGeneration::old()->active_remset_is_current()) {
 828       // We want to iterate over and clear the remset bits of the from-space page,
 829       // and insert current bits in the to-space page. However, with in-place
 830       // relocation, the from-space and to-space pages are the same. Clearing
 831       // is destructive, and is difficult to perform before or during the iteration.
 832       // However, clearing of the current bits has to be done before exposing the
 833       // to-space objects in the forwarding table.
 834       //
 835       // To solve this tricky dependency problem, we start by stashing away the
 836       // current bits in the previous bits, and clearing the current bits
 837       // (implemented by swapping the bits). This way, the current bits are
 838       // cleared before copying the objects (like a normal to-space page),
 839       // and the previous bits are representing a copy of the current bits
 840       // of the from-space page, and are used for iteration.
 841       from_page->swap_remset_bitmaps();
 842     }
 843   }
 844 
 845   ZPage* start_in_place_relocation(zoffset relocated_watermark) {
 846     _forwarding->in_place_relocation_claim_page();
 847     _forwarding->in_place_relocation_start(relocated_watermark);
 848 
 849     ZPage* const from_page = _forwarding->page();
 850 
 851     const ZPageAge to_age = _forwarding->to_age();
 852     const bool promotion = _forwarding->is_promotion();
 853 
 854     // Promotions happen through a new cloned page
 855     ZPage* const to_page = promotion ? from_page->clone_limited() : from_page;
 856 
 857     // Reset page for in-place relocation
 858     to_page->reset(to_age);
 859     to_page->reset_top_for_allocation();
 860     if (promotion) {
 861       to_page->remset_alloc();
 862     }
 863 
 864     // Verify that the inactive remset is clear when resetting the page for
 865     // in-place relocation.
 866     if (from_page->age() == ZPageAge::old) {
 867       if (ZGeneration::old()->active_remset_is_current()) {
 868         to_page->verify_remset_cleared_previous();
 869       } else {
 870         to_page->verify_remset_cleared_current();
 871       }
 872     }
 873 
 874     // Clear remset bits for all objects that were relocated
 875     // before this page became an in-place relocated page.
 876     start_in_place_relocation_prepare_remset(from_page);
 877 
 878     if (promotion) {
 879       // Register the the promotion
 880       ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
 881       ZGeneration::young()->register_in_place_relocate_promoted(from_page);
 882     }
 883 
 884     return to_page;
 885   }
 886 
 887   void relocate_object(oop obj) {
 888     const zaddress addr = to_zaddress(obj);
 889     assert(ZHeap::heap()->is_object_live(addr), "Should be live");
 890 
 891     while (!try_relocate_object(addr)) {
 892       // Allocate a new target page, or if that fails, use the page being
 893       // relocated as the new target, which will cause it to be relocated
 894       // in-place.
 895       const ZPageAge to_age = _forwarding->to_age();
 896       ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age));
 897       set_target(to_age, to_page);
 898       if (to_page != nullptr) {
 899         continue;
 900       }
 901 
 902       // Start in-place relocation to block other threads from accessing
 903       // the page, or its forwarding table, until it has been released
 904       // (relocation completed).
 905       to_page = start_in_place_relocation(ZAddress::offset(addr));
 906       set_target(to_age, to_page);
 907     }
 908   }
 909 
 910 public:
 911   ZRelocateWork(Allocator* allocator, ZGeneration* generation)
 912     : _allocator(allocator),
 913       _forwarding(nullptr),
 914       _target(),
 915       _generation(generation),
 916       _other_promoted(0),
 917       _other_compacted(0) {}
 918 
 919   ~ZRelocateWork() {
 920     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 921       _allocator->free_target_page(_target[i]);
 922     }
 923     // Report statistics on-behalf of non-worker threads
 924     _generation->increase_promoted(_other_promoted);
 925     _generation->increase_compacted(_other_compacted);
 926   }
 927 
 928   bool active_remset_is_current() const {
 929     // Normal old-to-old relocation can treat the from-page remset as a
 930     // read-only copy, and then copy over the appropriate remset bits to the
 931     // cleared to-page's 'current' remset bitmap.
 932     //
 933     // In-place relocation is more complicated. Since, the same page is both
 934     // a from-page and a to-page, we need to remove the old remset bits, and
 935     // add remset bits that corresponds to the new locations of the relocated
 936     // objects.
 937     //
 938     // Depending on how long ago (in terms of number of young GC's and the
 939     // current young GC's phase), the page was allocated, the active
 940     // remembered set will be in either the 'current' or 'previous' bitmap.
 941     //
 942     // If the active bits are in the 'previous' bitmap, we know that the
 943     // 'current' bitmap was cleared at some earlier point in time, and we can
 944     // simply set new bits in 'current' bitmap, and later when relocation has
 945     // read all the old remset bits, we could just clear the 'previous' remset
 946     // bitmap.
 947     //
 948     // If, on the other hand, the active bits are in the 'current' bitmap, then
 949     // that bitmap will be used to both read the old remset bits, and the
 950     // destination for the remset bits that we copy when an object is copied
 951     // to it's new location within the page. We need to *carefully* remove all
 952     // all old remset bits, without clearing out the newly set bits.
 953     return ZGeneration::old()->active_remset_is_current();
 954   }
 955 
 956   void clear_remset_before_in_place_reuse(ZPage* page) {
 957     if (_forwarding->from_age() != ZPageAge::old) {
 958       // No remset bits
 959       return;
 960     }
 961 
 962     // Clear 'previous' remset bits. For in-place relocated pages, the previous
 963     // remset bits are always used, even when active_remset_is_current().
 964     page->clear_remset_previous();
 965   }
 966 
 967   void finish_in_place_relocation() {
 968     // We are done with the from_space copy of the page
 969     _forwarding->in_place_relocation_finish();
 970   }
 971 
 972   void do_forwarding(ZForwarding* forwarding) {
 973     _forwarding = forwarding;
 974 
 975     _forwarding->page()->log_msg(" (relocate page)");
 976 
 977     ZVerify::before_relocation(_forwarding);
 978 
 979     // Relocate objects
 980     _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
 981 
 982     ZVerify::after_relocation(_forwarding);
 983 
 984     // Verify
 985     if (ZVerifyForwarding) {
 986       _forwarding->verify();
 987     }
 988 
 989     _generation->increase_freed(_forwarding->page()->size());
 990 
 991     // Deal with in-place relocation
 992     const bool in_place = _forwarding->in_place_relocation();
 993     if (in_place) {
 994       finish_in_place_relocation();
 995     }
 996 
 997     // Old from-space pages need to deal with remset bits
 998     if (_forwarding->from_age() == ZPageAge::old) {
 999       _forwarding->relocated_remembered_fields_after_relocate();
1000     }
1001 
1002     // Release relocated page
1003     _forwarding->release_page();
1004 
1005     if (in_place) {
1006       // Wait for all other threads to call release_page
1007       ZPage* const page = _forwarding->detach_page();
1008 
1009       // Ensure that previous remset bits are cleared
1010       clear_remset_before_in_place_reuse(page);
1011 
1012       page->log_msg(" (relocate page done in-place)");
1013 
1014       // Different pages when promoting
1015       ZPage* const target_page = target(_forwarding->to_age());
1016       _allocator->share_target_page(target_page);
1017 
1018     } else {
1019       // Wait for all other threads to call release_page
1020       ZPage* const page = _forwarding->detach_page();
1021 
1022       page->log_msg(" (relocate page done normal)");
1023 
1024       // Free page
1025       ZHeap::heap()->free_page(page, true /* allow_defragment */);
1026     }
1027   }
1028 };
1029 
1030 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1031 public:
1032   virtual void do_thread(Thread* thread) {
1033     JavaThread* const jt = JavaThread::cast(thread);
1034     ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1035     buffer->install_base_pointers();
1036   }
1037 };
1038 
1039 // Installs the object base pointers (object starts), for the fields written
1040 // in the store buffer. The code that searches for the object start uses that
1041 // liveness information stored in the pages. That information is lost when the
1042 // pages have been relocated and then destroyed.
1043 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1044 private:
1045   ZJavaThreadsIterator _threads_iter;
1046 
1047 public:
1048   ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1049     : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1050       _threads_iter(generation->id_optional()) {}
1051 
1052   virtual void work() {
1053     ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1054     _threads_iter.apply(&fix_store_buffer_cl);
1055   }
1056 };
1057 
1058 class ZRelocateTask : public ZRestartableTask {
1059 private:
1060   ZRelocationSetParallelIterator _iter;
1061   ZGeneration* const             _generation;
1062   ZRelocateQueue* const          _queue;
1063   ZRelocateSmallAllocator        _small_allocator;
1064   ZRelocateMediumAllocator       _medium_allocator;
1065 
1066 public:
1067   ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
1068     : ZRestartableTask("ZRelocateTask"),
1069       _iter(relocation_set),
1070       _generation(relocation_set->generation()),
1071       _queue(queue),
1072       _small_allocator(_generation),
1073       _medium_allocator(_generation) {}
1074 
1075   ~ZRelocateTask() {
1076     _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1077 
1078     // Signal that we're not using the queue anymore. Used mostly for asserts.
1079     _queue->deactivate();
1080   }
1081 
1082   virtual void work() {
1083     ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation);
1084     ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation);
1085 
1086     const auto do_forwarding = [&](ZForwarding* forwarding) {
1087       ZPage* const page = forwarding->page();
1088       if (page->is_small()) {
1089         small.do_forwarding(forwarding);
1090       } else {
1091         medium.do_forwarding(forwarding);
1092       }
1093 
1094       // Absolute last thing done while relocating a page.
1095       //
1096       // We don't use the SuspendibleThreadSet when relocating pages.
1097       // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1098       //
1099       // After the mark_done call a safepointing could be completed and a
1100       // new GC phase could be entered.
1101       forwarding->mark_done();
1102     };
1103 
1104     const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1105       if (forwarding->claim()) {
1106         do_forwarding(forwarding);
1107       }
1108     };
1109 
1110     const auto do_forwarding_one_from_iter = [&]() {
1111       ZForwarding* forwarding;
1112 
1113       if (_iter.next(&forwarding)) {
1114         claim_and_do_forwarding(forwarding);
1115         return true;
1116       }
1117 
1118       return false;
1119     };
1120 
1121     for (;;) {
1122       // As long as there are requests in the relocate queue, there are threads
1123       // waiting in a VM state that does not allow them to be blocked. The
1124       // worker thread needs to finish relocate these pages, and allow the
1125       // other threads to continue and proceed to a blocking state. After that,
1126       // the worker threads are allowed to safepoint synchronize.
1127       for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1128         do_forwarding(forwarding);
1129       }
1130 
1131       if (!do_forwarding_one_from_iter()) {
1132         // No more work
1133         break;
1134       }
1135 
1136       if (_generation->should_worker_resize()) {
1137         break;
1138       }
1139     }
1140 
1141     _queue->leave();
1142   }
1143 
1144   virtual void resize_workers(uint nworkers) {
1145     _queue->resize_workers(nworkers);
1146   }
1147 };
1148 
1149 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1150   const zpointer ptr = Atomic::load(p);
1151 
1152   if (ZPointer::is_store_good(ptr)) {
1153     // Already has a remset entry
1154     return;
1155   }
1156 
1157   // Remset entries are used for two reasons:
1158   // 1) Young marking old-to-young pointer roots
1159   // 2) Deferred remapping of stale old-to-young pointers
1160   //
1161   // This load barrier will up-front perform the remapping of (2),
1162   // and the code below only has to make sure we register up-to-date
1163   // old-to-young pointers for (1).
1164   const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1165 
1166   if (is_null(addr)) {
1167     // No need for remset entries for null pointers
1168     return;
1169   }
1170 
1171   if (ZHeap::heap()->is_old(addr)) {
1172     // No need for remset entries for pointers to old gen
1173     return;
1174   }
1175 
1176   ZRelocate::add_remset(p);
1177 }
1178 
1179 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1180 private:
1181   ZStatTimerYoung                _timer;
1182   ZArrayParallelIterator<ZPage*> _iter;
1183 
1184 public:
1185   ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1186     : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1187       _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1188       _iter(pages) {}
1189 
1190   virtual void work() {
1191     SuspendibleThreadSetJoiner sts_joiner;
1192 
1193     for (ZPage* page; _iter.next(&page);) {
1194       page->object_iterate([&](oop obj) {
1195         ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1196       });
1197 
1198       SuspendibleThreadSet::yield();
1199       if (ZGeneration::young()->should_worker_resize()) {
1200         return;
1201       }
1202     }
1203   }
1204 };
1205 
1206 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1207   {
1208     // Install the store buffer's base pointers before the
1209     // relocate task destroys the liveness information in
1210     // the relocated pages.
1211     ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1212     workers()->run(&buffer_task);
1213   }
1214 
1215   {
1216     ZRelocateTask relocate_task(relocation_set, &_queue);
1217     workers()->run(&relocate_task);
1218   }
1219 
1220   if (relocation_set->generation()->is_young()) {
1221     ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1222     workers()->run(&task);
1223   }
1224 }
1225 
1226 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1227   if (from_age == ZPageAge::old) {
1228     return ZPageAge::old;
1229   }
1230 
1231   const uint age = static_cast<uint>(from_age);
1232   if (age >= ZGeneration::young()->tenuring_threshold()) {
1233     return ZPageAge::old;
1234   }
1235 
1236   return static_cast<ZPageAge>(age + 1);
1237 }
1238 
1239 class ZFlipAgePagesTask : public ZTask {
1240 private:
1241   ZArrayParallelIterator<ZPage*> _iter;
1242 
1243 public:
1244   ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1245     : ZTask("ZPromotePagesTask"),
1246       _iter(pages) {}
1247 
1248   virtual void work() {
1249     SuspendibleThreadSetJoiner sts_joiner;
1250     ZArray<ZPage*> promoted_pages;
1251 
1252     for (ZPage* prev_page; _iter.next(&prev_page);) {
1253       const ZPageAge from_age = prev_page->age();
1254       const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1255       assert(from_age != ZPageAge::old, "invalid age for a young collection");
1256 
1257       // Figure out if this is proper promotion
1258       const bool promotion = to_age == ZPageAge::old;
1259 
1260       if (promotion) {
1261         // Before promoting an object (and before relocate start), we must ensure that all
1262         // contained zpointers are store good. The marking code ensures that for non-null
1263         // pointers, but null pointers are ignored. This code ensures that even null pointers
1264         // are made store good, for the promoted objects.
1265         prev_page->object_iterate([&](oop obj) {
1266           ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1267         });
1268       }
1269 
1270       // Logging
1271       prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1272 
1273       // Setup to-space page
1274       ZPage* const new_page = promotion ? prev_page->clone_limited() : prev_page;
1275 
1276       // Reset page for flip aging
1277       new_page->reset(to_age);
1278       new_page->reset_livemap();
1279       if (promotion) {
1280         new_page->remset_alloc();
1281       }
1282 
1283       if (promotion) {
1284         ZGeneration::young()->flip_promote(prev_page, new_page);
1285         // Defer promoted page registration times the lock is taken
1286         promoted_pages.push(prev_page);
1287       }
1288 
1289       SuspendibleThreadSet::yield();
1290     }
1291 
1292     ZGeneration::young()->register_flip_promoted(promoted_pages);
1293   }
1294 };
1295 
1296 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1297   ZFlipAgePagesTask flip_age_task(pages);
1298   workers()->run(&flip_age_task);
1299 }
1300 
1301 void ZRelocate::synchronize() {
1302   _queue.synchronize();
1303 }
1304 
1305 void ZRelocate::desynchronize() {
1306   _queue.desynchronize();
1307 }
1308 
1309 ZRelocateQueue* ZRelocate::queue() {
1310   return &_queue;
1311 }
1312 
1313 bool ZRelocate::is_queue_active() const {
1314   return _queue.is_active();
1315 }