1 /*
   2  * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gc_globals.hpp"
  26 #include "gc/shared/suspendibleThreadSet.hpp"
  27 #include "gc/z/zAbort.inline.hpp"
  28 #include "gc/z/zAddress.inline.hpp"
  29 #include "gc/z/zAllocator.inline.hpp"
  30 #include "gc/z/zBarrier.inline.hpp"
  31 #include "gc/z/zCollectedHeap.hpp"
  32 #include "gc/z/zForwarding.inline.hpp"
  33 #include "gc/z/zGeneration.inline.hpp"
  34 #include "gc/z/zHeap.inline.hpp"
  35 #include "gc/z/zIndexDistributor.inline.hpp"
  36 #include "gc/z/zIterator.inline.hpp"
  37 #include "gc/z/zPage.inline.hpp"
  38 #include "gc/z/zPageAge.hpp"
  39 #include "gc/z/zRelocate.hpp"
  40 #include "gc/z/zRelocationSet.inline.hpp"
  41 #include "gc/z/zRootsIterator.hpp"
  42 #include "gc/z/zStackWatermark.hpp"
  43 #include "gc/z/zStat.hpp"
  44 #include "gc/z/zTask.hpp"
  45 #include "gc/z/zUncoloredRoot.inline.hpp"
  46 #include "gc/z/zVerify.hpp"
  47 #include "gc/z/zWorkers.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/atomic.hpp"
  50 #include "utilities/debug.hpp"
  51 
  52 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
  53 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
  54 
  55 static uintptr_t forwarding_index(ZForwarding* forwarding, zoffset from_offset) {
  56   return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift();
  57 }
  58 
  59 static zaddress forwarding_find(ZForwarding* forwarding, zoffset from_offset, ZForwardingCursor* cursor) {
  60   const uintptr_t from_index = forwarding_index(forwarding, from_offset);
  61   const ZForwardingEntry entry = forwarding->find(from_index, cursor);
  62   return entry.populated() ? ZOffset::address(to_zoffset(entry.to_offset())) : zaddress::null;
  63 }
  64 
  65 static zaddress forwarding_find(ZForwarding* forwarding, zaddress_unsafe from_addr, ZForwardingCursor* cursor) {
  66   return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor);
  67 }
  68 
  69 static zaddress forwarding_find(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
  70   return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor);
  71 }
  72 
  73 static zaddress forwarding_insert(ZForwarding* forwarding, zoffset from_offset, zaddress to_addr, ZForwardingCursor* cursor) {
  74   const uintptr_t from_index = forwarding_index(forwarding, from_offset);
  75   const zoffset to_offset = ZAddress::offset(to_addr);
  76   const zoffset to_offset_final = forwarding->insert(from_index, to_offset, cursor);
  77   return ZOffset::address(to_offset_final);
  78 }
  79 
  80 static zaddress forwarding_insert(ZForwarding* forwarding, zaddress from_addr, zaddress to_addr, ZForwardingCursor* cursor) {
  81   return forwarding_insert(forwarding, ZAddress::offset(from_addr), to_addr, cursor);
  82 }
  83 
  84 ZRelocateQueue::ZRelocateQueue()
  85   : _lock(),
  86     _queue(),
  87     _nworkers(0),
  88     _nsynchronized(0),
  89     _synchronize(false),
  90     _needs_attention(0) {}
  91 
  92 bool ZRelocateQueue::needs_attention() const {
  93   return Atomic::load(&_needs_attention) != 0;
  94 }
  95 
  96 void ZRelocateQueue::inc_needs_attention() {
  97   const int needs_attention = Atomic::add(&_needs_attention, 1);
  98   assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
  99 }
 100 
 101 void ZRelocateQueue::dec_needs_attention() {
 102   const int needs_attention = Atomic::sub(&_needs_attention, 1);
 103   assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
 104 }
 105 
 106 void ZRelocateQueue::join(uint nworkers) {
 107   assert(nworkers != 0, "Must request at least one worker");
 108   assert(_nworkers == 0, "Invalid state");
 109   assert(_nsynchronized == 0, "Invalid state");
 110 
 111   log_debug(gc, reloc)("Joining workers: %u", nworkers);
 112 
 113   _nworkers = nworkers;
 114 }
 115 
 116 void ZRelocateQueue::resize_workers(uint nworkers) {
 117   assert(nworkers != 0, "Must request at least one worker");
 118   assert(_nworkers == 0, "Invalid state");
 119   assert(_nsynchronized == 0, "Invalid state");
 120 
 121   log_debug(gc, reloc)("Resize workers: %u", nworkers);
 122 
 123   ZLocker<ZConditionLock> locker(&_lock);
 124   _nworkers = nworkers;
 125 }
 126 
 127 void ZRelocateQueue::leave() {
 128   ZLocker<ZConditionLock> locker(&_lock);
 129   _nworkers--;
 130 
 131   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 132 
 133   log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
 134 
 135   // Prune done forwardings
 136   const bool forwardings_done = prune();
 137 
 138   // Check if all workers synchronized
 139   const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
 140 
 141   if (forwardings_done || last_synchronized) {
 142     _lock.notify_all();
 143   }
 144 }
 145 
 146 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
 147   ZStatTimer timer(ZCriticalPhaseRelocationStall);
 148   ZLocker<ZConditionLock> locker(&_lock);
 149 
 150   if (forwarding->is_done()) {
 151     return;
 152   }
 153 
 154   _queue.append(forwarding);
 155   if (_queue.length() == 1) {
 156     // Queue became non-empty
 157     inc_needs_attention();
 158     _lock.notify_all();
 159   }
 160 
 161   while (!forwarding->is_done()) {
 162     _lock.wait();
 163   }
 164 }
 165 
 166 bool ZRelocateQueue::prune() {
 167   if (_queue.is_empty()) {
 168     return false;
 169   }
 170 
 171   bool done = false;
 172 
 173   for (int i = 0; i < _queue.length();) {
 174     const ZForwarding* const forwarding = _queue.at(i);
 175     if (forwarding->is_done()) {
 176       done = true;
 177 
 178       _queue.delete_at(i);
 179     } else {
 180       i++;
 181     }
 182   }
 183 
 184   if (_queue.is_empty()) {
 185     dec_needs_attention();
 186   }
 187 
 188   return done;
 189 }
 190 
 191 ZForwarding* ZRelocateQueue::prune_and_claim() {
 192   if (prune()) {
 193     _lock.notify_all();
 194   }
 195 
 196   for (int i = 0; i < _queue.length(); i++) {
 197     ZForwarding* const forwarding = _queue.at(i);
 198     if (forwarding->claim()) {
 199       return forwarding;
 200     }
 201   }
 202 
 203   return nullptr;
 204 }
 205 
 206 class ZRelocateQueueSynchronizeThread {
 207 private:
 208   ZRelocateQueue* const _queue;
 209 
 210 public:
 211   ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
 212     : _queue(queue) {
 213     _queue->synchronize_thread();
 214   }
 215 
 216   ~ZRelocateQueueSynchronizeThread() {
 217     _queue->desynchronize_thread();
 218   }
 219 };
 220 
 221 void ZRelocateQueue::synchronize_thread() {
 222   _nsynchronized++;
 223 
 224   log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
 225 
 226   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 227   if (_nsynchronized == _nworkers) {
 228     // All workers synchronized
 229     _lock.notify_all();
 230   }
 231 }
 232 
 233 void ZRelocateQueue::desynchronize_thread() {
 234   _nsynchronized--;
 235 
 236   log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
 237 
 238   assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 239 }
 240 
 241 ZForwarding* ZRelocateQueue::synchronize_poll() {
 242   // Fast path avoids locking
 243   if (!needs_attention()) {
 244     return nullptr;
 245   }
 246 
 247   // Slow path to get the next forwarding and/or synchronize
 248   ZLocker<ZConditionLock> locker(&_lock);
 249 
 250   {
 251     ZForwarding* const forwarding = prune_and_claim();
 252     if (forwarding != nullptr) {
 253       // Don't become synchronized while there are elements in the queue
 254       return forwarding;
 255     }
 256   }
 257 
 258   if (!_synchronize) {
 259     return nullptr;
 260   }
 261 
 262   ZRelocateQueueSynchronizeThread rqst(this);
 263 
 264   do {
 265     _lock.wait();
 266 
 267     ZForwarding* const forwarding = prune_and_claim();
 268     if (forwarding != nullptr) {
 269       return forwarding;
 270     }
 271   } while (_synchronize);
 272 
 273   return nullptr;
 274 }
 275 
 276 void ZRelocateQueue::clear() {
 277   assert(_nworkers == 0, "Invalid state");
 278 
 279   if (_queue.is_empty()) {
 280     return;
 281   }
 282 
 283   ZArrayIterator<ZForwarding*> iter(&_queue);
 284   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 285     assert(forwarding->is_done(), "All should be done");
 286   }
 287 
 288   assert(false, "Clear was not empty");
 289 
 290   _queue.clear();
 291   dec_needs_attention();
 292 }
 293 
 294 void ZRelocateQueue::synchronize() {
 295   ZLocker<ZConditionLock> locker(&_lock);
 296   _synchronize = true;
 297 
 298   inc_needs_attention();
 299 
 300   log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 301 
 302   while (_nworkers != _nsynchronized) {
 303     _lock.wait();
 304     log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 305   }
 306 }
 307 
 308 void ZRelocateQueue::desynchronize() {
 309   ZLocker<ZConditionLock> locker(&_lock);
 310   _synchronize = false;
 311 
 312   log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 313 
 314   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 315 
 316   dec_needs_attention();
 317 
 318   _lock.notify_all();
 319 }
 320 
 321 ZRelocate::ZRelocate(ZGeneration* generation)
 322   : _generation(generation),
 323     _queue() {}
 324 
 325 ZWorkers* ZRelocate::workers() const {
 326   return _generation->workers();
 327 }
 328 
 329 void ZRelocate::start() {
 330   _queue.join(workers()->active_workers());
 331 }
 332 
 333 void ZRelocate::add_remset(volatile zpointer* p) {
 334   ZGeneration::young()->remember(p);
 335 }
 336 
 337 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 338   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 339 
 340   // Allocate object
 341   const size_t size = ZUtils::object_size(from_addr);
 342 
 343   ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
 344 
 345   const zaddress to_addr = allocator->alloc_object(size);
 346 
 347   if (is_null(to_addr)) {
 348     // Allocation failed
 349     return zaddress::null;
 350   }
 351 
 352   // Copy object
 353   ZUtils::object_copy_disjoint(from_addr, to_addr, size);
 354 
 355   // Insert forwarding
 356   const zaddress to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor);
 357 
 358   if (to_addr_final != to_addr) {
 359     // Already relocated, try undo allocation
 360     allocator->undo_alloc_object(to_addr, size);
 361   }
 362 
 363   return to_addr_final;
 364 }
 365 
 366 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 367   ZForwardingCursor cursor;
 368 
 369   // Lookup forwarding
 370   zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor);
 371   if (!is_null(to_addr)) {
 372     // Already relocated
 373     return to_addr;
 374   }
 375 
 376   // Relocate object
 377   if (forwarding->retain_page(&_queue)) {
 378     assert(_generation->is_phase_relocate(), "Must be");
 379     to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
 380     forwarding->release_page();
 381 
 382     if (!is_null(to_addr)) {
 383       // Success
 384       return to_addr;
 385     }
 386 
 387     // Failed to relocate object. Signal and wait for a worker thread to
 388     // complete relocation of this page, and then forward the object.
 389     _queue.add_and_wait(forwarding);
 390   }
 391 
 392   // Forward object
 393   return forward_object(forwarding, from_addr);
 394 }
 395 
 396 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 397   ZForwardingCursor cursor;
 398   const zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor);
 399   assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
 400   return to_addr;
 401 }
 402 
 403 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) {
 404   if (ZStressRelocateInPlace) {
 405     // Simulate failure to allocate a new page. This will
 406     // cause the page being relocated to be relocated in-place.
 407     return nullptr;
 408   }
 409 
 410   ZAllocationFlags flags;
 411   flags.set_non_blocking();
 412   flags.set_gc_relocation();
 413 
 414   return allocator->alloc_page_for_relocation(type, size, flags);
 415 }
 416 
 417 static void retire_target_page(ZGeneration* generation, ZPage* page) {
 418   if (generation->is_young() && page->is_old()) {
 419     generation->increase_promoted(page->used());
 420   } else {
 421     generation->increase_compacted(page->used());
 422   }
 423 
 424   // Free target page if it is empty. We can end up with an empty target
 425   // page if we allocated a new target page, and then lost the race to
 426   // relocate the remaining objects, leaving the target page empty when
 427   // relocation completed.
 428   if (page->used() == 0) {
 429     ZHeap::heap()->free_page(page);
 430   }
 431 }
 432 
 433 class ZRelocateSmallAllocator {
 434 private:
 435   ZGeneration* const _generation;
 436   volatile size_t    _in_place_count;
 437 
 438 public:
 439   ZRelocateSmallAllocator(ZGeneration* generation)
 440     : _generation(generation),
 441       _in_place_count(0) {}
 442 
 443   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 444     ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 445     ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size());
 446     if (page == nullptr) {
 447       Atomic::inc(&_in_place_count);
 448     }
 449 
 450     if (target != nullptr) {
 451       // Retire the old target page
 452       retire_target_page(_generation, target);
 453     }
 454 
 455     return page;
 456   }
 457 
 458   void share_target_page(ZPage* page) {
 459     // Does nothing
 460   }
 461 
 462   void free_target_page(ZPage* page) {
 463     if (page != nullptr) {
 464       retire_target_page(_generation, page);
 465     }
 466   }
 467 
 468   zaddress alloc_object(ZPage* page, size_t size) const {
 469     return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
 470   }
 471 
 472   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 473     page->undo_alloc_object(addr, size);
 474   }
 475 
 476   const size_t in_place_count() const {
 477     return _in_place_count;
 478   }
 479 };
 480 
 481 class ZRelocateMediumAllocator {
 482 private:
 483   ZGeneration* const _generation;
 484   ZConditionLock     _lock;
 485   ZPage*             _shared[ZAllocator::_relocation_allocators];
 486   bool               _in_place;
 487   volatile size_t    _in_place_count;
 488 
 489 public:
 490   ZRelocateMediumAllocator(ZGeneration* generation)
 491     : _generation(generation),
 492       _lock(),
 493       _shared(),
 494       _in_place(false),
 495       _in_place_count(0) {}
 496 
 497   ~ZRelocateMediumAllocator() {
 498     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 499       if (_shared[i] != nullptr) {
 500         retire_target_page(_generation, _shared[i]);
 501       }
 502     }
 503   }
 504 
 505   ZPage* shared(ZPageAge age) {
 506     return _shared[static_cast<uint>(age) - 1];
 507   }
 508 
 509   void set_shared(ZPageAge age, ZPage* page) {
 510     _shared[static_cast<uint>(age) - 1] = page;
 511   }
 512 
 513   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 514     ZLocker<ZConditionLock> locker(&_lock);
 515 
 516     // Wait for any ongoing in-place relocation to complete
 517     while (_in_place) {
 518       _lock.wait();
 519     }
 520 
 521     // Allocate a new page only if the shared page is the same as the
 522     // current target page. The shared page will be different from the
 523     // current target page if another thread shared a page, or allocated
 524     // a new page.
 525     const ZPageAge to_age = forwarding->to_age();
 526     if (shared(to_age) == target) {
 527       ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 528       ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size());
 529       set_shared(to_age, to_page);
 530       if (to_page == nullptr) {
 531         Atomic::inc(&_in_place_count);
 532         _in_place = true;
 533       }
 534 
 535       // This thread is responsible for retiring the shared target page
 536       if (target != nullptr) {
 537         retire_target_page(_generation, target);
 538       }
 539     }
 540 
 541     return shared(to_age);
 542   }
 543 
 544   void share_target_page(ZPage* page) {
 545     const ZPageAge age = page->age();
 546 
 547     ZLocker<ZConditionLock> locker(&_lock);
 548     assert(_in_place, "Invalid state");
 549     assert(shared(age) == nullptr, "Invalid state");
 550     assert(page != nullptr, "Invalid page");
 551 
 552     set_shared(age, page);
 553     _in_place = false;
 554 
 555     _lock.notify_all();
 556   }
 557 
 558   void free_target_page(ZPage* page) {
 559     // Does nothing
 560   }
 561 
 562   zaddress alloc_object(ZPage* page, size_t size) const {
 563     return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
 564   }
 565 
 566   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 567     page->undo_alloc_object_atomic(addr, size);
 568   }
 569 
 570   const size_t in_place_count() const {
 571     return _in_place_count;
 572   }
 573 };
 574 
 575 template <typename Allocator>
 576 class ZRelocateWork : public StackObj {
 577 private:
 578   Allocator* const   _allocator;
 579   ZForwarding*       _forwarding;
 580   ZPage*             _target[ZAllocator::_relocation_allocators];
 581   ZGeneration* const _generation;
 582   size_t             _other_promoted;
 583   size_t             _other_compacted;
 584 
 585   ZPage* target(ZPageAge age) {
 586     return _target[static_cast<uint>(age) - 1];
 587   }
 588 
 589   void set_target(ZPageAge age, ZPage* page) {
 590     _target[static_cast<uint>(age) - 1] = page;
 591   }
 592 
 593   size_t object_alignment() const {
 594     return (size_t)1 << _forwarding->object_alignment_shift();
 595   }
 596 
 597   void increase_other_forwarded(size_t unaligned_object_size) {
 598     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 599     if (_forwarding->is_promotion()) {
 600       _other_promoted += aligned_size;
 601     } else {
 602       _other_compacted += aligned_size;
 603     }
 604   }
 605 
 606   zaddress try_relocate_object_inner(zaddress from_addr) {
 607     ZForwardingCursor cursor;
 608 
 609     ZPage* const to_page = target(_forwarding->to_age());
 610 
 611     // Lookup forwarding
 612     {
 613       const zaddress to_addr = forwarding_find(_forwarding, from_addr, &cursor);
 614       if (!is_null(to_addr)) {
 615         // Already relocated
 616         const size_t size = ZUtils::object_size(to_addr);
 617         increase_other_forwarded(size);
 618         return to_addr;
 619       }
 620     }
 621 
 622     // Allocate object
 623     const size_t size = ZUtils::object_size(from_addr);
 624     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 625     if (is_null(allocated_addr)) {
 626       // Allocation failed
 627       return zaddress::null;
 628     }
 629 
 630     // Copy object. Use conjoint copying if we are relocating
 631     // in-place and the new object overlaps with the old object.
 632     if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
 633       ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
 634     } else {
 635       ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
 636     }
 637 
 638     // Insert forwarding
 639     const zaddress to_addr = forwarding_insert(_forwarding, from_addr, allocated_addr, &cursor);
 640     if (to_addr != allocated_addr) {
 641       // Already relocated, undo allocation
 642       _allocator->undo_alloc_object(to_page, to_addr, size);
 643       increase_other_forwarded(size);
 644     }
 645 
 646     return to_addr;
 647   }
 648 
 649   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
 650     // Old-to-old relocation - move existing remset bits
 651 
 652     // If this is called for an in-place relocated page, then this code has the
 653     // responsibility to clear the old remset bits. Extra care is needed because:
 654     //
 655     // 1) The to-object copy can overlap with the from-object copy
 656     // 2) Remset bits of old objects need to be cleared
 657     //
 658     // A watermark is used to keep track of how far the old remset bits have been removed.
 659 
 660     const bool in_place = _forwarding->in_place_relocation();
 661     ZPage* const from_page = _forwarding->page();
 662     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 663 
 664     // Note: even with in-place relocation, the to_page could be another page
 665     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 666 
 667     // Uses _relaxed version to handle that in-place relocation resets _top
 668     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 669     assert(to_page->is_in(to_addr), "Must be");
 670 
 671 
 672     // Read the size from the to-object, since the from-object
 673     // could have been overwritten during in-place relocation.
 674     const size_t size = ZUtils::object_size(to_addr);
 675 
 676     // If a young generation collection started while the old generation
 677     // relocated  objects, the remember set bits were flipped from "current"
 678     // to "previous".
 679     //
 680     // We need to select the correct remembered sets bitmap to ensure that the
 681     // old remset bits are found.
 682     //
 683     // Note that if the young generation marking (remset scanning) finishes
 684     // before the old generation relocation has relocated this page, then the
 685     // young generation will visit this page's previous remembered set bits and
 686     // moved them over to the current bitmap.
 687     //
 688     // If the young generation runs multiple cycles while the old generation is
 689     // relocating, then the first cycle will have consume the the old remset,
 690     // bits and moved associated objects to a new old page. The old relocation
 691     // could find either the the two bitmaps. So, either it will find the original
 692     // remset bits for the page, or it will find an empty bitmap for the page. It
 693     // doesn't matter for correctness, because the young generation marking has
 694     // already taken care of the bits.
 695 
 696     const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
 697 
 698     // When in-place relocation is done and the old remset bits are located in
 699     // the bitmap that is going to be used for the new remset bits, then we
 700     // need to clear the old bits before the new bits are inserted.
 701     const bool iterate_current_remset = active_remset_is_current && !in_place;
 702 
 703     BitMap::Iterator iter = iterate_current_remset
 704         ? from_page->remset_iterator_limited_current(from_local_offset, size)
 705         : from_page->remset_iterator_limited_previous(from_local_offset, size);
 706 
 707     for (BitMap::idx_t field_bit : iter) {
 708       const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
 709 
 710       // Add remset entry in the to-page
 711       const uintptr_t offset = field_local_offset - from_local_offset;
 712       const zaddress to_field = to_addr + offset;
 713       log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
 714           untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
 715 
 716       volatile zpointer* const p = (volatile zpointer*)to_field;
 717 
 718       if (ZGeneration::young()->is_phase_mark()) {
 719         // Young generation remembered set scanning needs to know about this
 720         // field. It will take responsibility to add a new remember set entry if needed.
 721         _forwarding->relocated_remembered_fields_register(p);
 722       } else {
 723         to_page->remember(p);
 724         if (in_place) {
 725           assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
 726         }
 727       }
 728     }
 729   }
 730 
 731   static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
 732     if (ZHeap::heap()->is_young(addr)) {
 733       ZRelocate::add_remset(p);
 734       return true;
 735     }
 736 
 737     return false;
 738   }
 739 
 740   static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
 741     const zpointer ptr = Atomic::load(p);
 742 
 743     assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
 744 
 745     if (ZPointer::is_store_good(ptr)) {
 746       // Already has a remset entry
 747       return;
 748     }
 749 
 750     if (ZPointer::is_load_good(ptr)) {
 751       if (!is_null_any(ptr)) {
 752         const zaddress addr = ZPointer::uncolor(ptr);
 753         add_remset_if_young(p, addr);
 754       }
 755       // No need to remap it is already load good
 756       return;
 757     }
 758 
 759     if (is_null_any(ptr)) {
 760       // Eagerly remap to skip adding a remset entry just to get deferred remapping
 761       ZBarrier::remap_young_relocated(p, ptr);
 762       return;
 763     }
 764 
 765     const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
 766     ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
 767 
 768     if (forwarding == nullptr) {
 769       // Object isn't being relocated
 770       const zaddress addr = safe(addr_unsafe);
 771       if (!add_remset_if_young(p, addr)) {
 772         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 773         ZBarrier::remap_young_relocated(p, ptr);
 774       }
 775       return;
 776     }
 777 
 778     const zaddress addr = forwarding->find(addr_unsafe);
 779 
 780     if (!is_null(addr)) {
 781       // Object has already been relocated
 782       if (!add_remset_if_young(p, addr)) {
 783         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 784         ZBarrier::remap_young_relocated(p, ptr);
 785       }
 786       return;
 787     }
 788 
 789     // Object has not been relocated yet
 790     // Don't want to eagerly relocate objects, so just add a remset
 791     ZRelocate::add_remset(p);
 792     return;
 793   }
 794 
 795   void update_remset_promoted(zaddress to_addr) const {
 796     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 797   }
 798 
 799   void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
 800     if (_forwarding->to_age() != ZPageAge::old) {
 801       // No remembered set in young pages
 802       return;
 803     }
 804 
 805     // Need to deal with remset when moving objects to the old generation
 806     if (_forwarding->from_age() == ZPageAge::old) {
 807       update_remset_old_to_old(from_addr, to_addr);
 808       return;
 809     }
 810 
 811     // Normal promotion
 812     update_remset_promoted(to_addr);
 813   }
 814 
 815   bool try_relocate_object(zaddress from_addr) {
 816     const zaddress to_addr = try_relocate_object_inner(from_addr);
 817 
 818     if (is_null(to_addr)) {
 819       return false;
 820     }
 821 
 822     update_remset_for_fields(from_addr, to_addr);
 823 
 824     return true;
 825   }
 826 
 827   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 828     if (_forwarding->from_age() != ZPageAge::old) {
 829       // Only old pages have use remset bits
 830       return;
 831     }
 832 
 833     if (ZGeneration::old()->active_remset_is_current()) {
 834       // We want to iterate over and clear the remset bits of the from-space page,
 835       // and insert current bits in the to-space page. However, with in-place
 836       // relocation, the from-space and to-space pages are the same. Clearing
 837       // is destructive, and is difficult to perform before or during the iteration.
 838       // However, clearing of the current bits has to be done before exposing the
 839       // to-space objects in the forwarding table.
 840       //
 841       // To solve this tricky dependency problem, we start by stashing away the
 842       // current bits in the previous bits, and clearing the current bits
 843       // (implemented by swapping the bits). This way, the current bits are
 844       // cleared before copying the objects (like a normal to-space page),
 845       // and the previous bits are representing a copy of the current bits
 846       // of the from-space page, and are used for iteration.
 847       from_page->swap_remset_bitmaps();
 848     }
 849   }
 850 
 851   ZPage* start_in_place_relocation(zoffset relocated_watermark) {
 852     _forwarding->in_place_relocation_claim_page();
 853     _forwarding->in_place_relocation_start(relocated_watermark);
 854 
 855     ZPage* const from_page = _forwarding->page();
 856 
 857     const ZPageAge to_age = _forwarding->to_age();
 858     const bool promotion = _forwarding->is_promotion();
 859 
 860     // Promotions happen through a new cloned page
 861     ZPage* const to_page = promotion ? from_page->clone_limited() : from_page;
 862     to_page->reset(to_age, ZPageResetType::InPlaceRelocation);
 863 
 864     // Clear remset bits for all objects that were relocated
 865     // before this page became an in-place relocated page.
 866     start_in_place_relocation_prepare_remset(from_page);
 867 
 868     if (promotion) {
 869       // Register the the promotion
 870       ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
 871       ZGeneration::young()->register_in_place_relocate_promoted(from_page);
 872     }
 873 
 874     return to_page;
 875   }
 876 
 877   void relocate_object(oop obj) {
 878     const zaddress addr = to_zaddress(obj);
 879     assert(ZHeap::heap()->is_object_live(addr), "Should be live");
 880 
 881     while (!try_relocate_object(addr)) {
 882       // Allocate a new target page, or if that fails, use the page being
 883       // relocated as the new target, which will cause it to be relocated
 884       // in-place.
 885       const ZPageAge to_age = _forwarding->to_age();
 886       ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age));
 887       set_target(to_age, to_page);
 888       if (to_page != nullptr) {
 889         continue;
 890       }
 891 
 892       // Start in-place relocation to block other threads from accessing
 893       // the page, or its forwarding table, until it has been released
 894       // (relocation completed).
 895       to_page = start_in_place_relocation(ZAddress::offset(addr));
 896       set_target(to_age, to_page);
 897     }
 898   }
 899 
 900 public:
 901   ZRelocateWork(Allocator* allocator, ZGeneration* generation)
 902     : _allocator(allocator),
 903       _forwarding(nullptr),
 904       _target(),
 905       _generation(generation),
 906       _other_promoted(0),
 907       _other_compacted(0) {}
 908 
 909   ~ZRelocateWork() {
 910     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 911       _allocator->free_target_page(_target[i]);
 912     }
 913     // Report statistics on-behalf of non-worker threads
 914     _generation->increase_promoted(_other_promoted);
 915     _generation->increase_compacted(_other_compacted);
 916   }
 917 
 918   bool active_remset_is_current() const {
 919     // Normal old-to-old relocation can treat the from-page remset as a
 920     // read-only copy, and then copy over the appropriate remset bits to the
 921     // cleared to-page's 'current' remset bitmap.
 922     //
 923     // In-place relocation is more complicated. Since, the same page is both
 924     // a from-page and a to-page, we need to remove the old remset bits, and
 925     // add remset bits that corresponds to the new locations of the relocated
 926     // objects.
 927     //
 928     // Depending on how long ago (in terms of number of young GC's and the
 929     // current young GC's phase), the page was allocated, the active
 930     // remembered set will be in either the 'current' or 'previous' bitmap.
 931     //
 932     // If the active bits are in the 'previous' bitmap, we know that the
 933     // 'current' bitmap was cleared at some earlier point in time, and we can
 934     // simply set new bits in 'current' bitmap, and later when relocation has
 935     // read all the old remset bits, we could just clear the 'previous' remset
 936     // bitmap.
 937     //
 938     // If, on the other hand, the active bits are in the 'current' bitmap, then
 939     // that bitmap will be used to both read the old remset bits, and the
 940     // destination for the remset bits that we copy when an object is copied
 941     // to it's new location within the page. We need to *carefully* remove all
 942     // all old remset bits, without clearing out the newly set bits.
 943     return ZGeneration::old()->active_remset_is_current();
 944   }
 945 
 946   void clear_remset_before_reuse(ZPage* page, bool in_place) {
 947     if (_forwarding->from_age() != ZPageAge::old) {
 948       // No remset bits
 949       return;
 950     }
 951 
 952     if (in_place) {
 953       // Clear 'previous' remset bits. For in-place relocated pages, the previous
 954       // remset bits are always used, even when active_remset_is_current().
 955       page->clear_remset_previous();
 956 
 957       return;
 958     }
 959 
 960     // Normal relocate
 961 
 962     // Clear active remset bits
 963     if (active_remset_is_current()) {
 964       page->clear_remset_current();
 965     } else {
 966       page->clear_remset_previous();
 967     }
 968 
 969     // Verify that inactive remset bits are all cleared
 970     if (active_remset_is_current()) {
 971       page->verify_remset_cleared_previous();
 972     } else {
 973       page->verify_remset_cleared_current();
 974     }
 975   }
 976 
 977   void finish_in_place_relocation() {
 978     // We are done with the from_space copy of the page
 979     _forwarding->in_place_relocation_finish();
 980   }
 981 
 982   void do_forwarding(ZForwarding* forwarding) {
 983     _forwarding = forwarding;
 984 
 985     _forwarding->page()->log_msg(" (relocate page)");
 986 
 987     ZVerify::before_relocation(_forwarding);
 988 
 989     // Relocate objects
 990     _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
 991 
 992     ZVerify::after_relocation(_forwarding);
 993 
 994     // Verify
 995     if (ZVerifyForwarding) {
 996       _forwarding->verify();
 997     }
 998 
 999     _generation->increase_freed(_forwarding->page()->size());
1000 
1001     // Deal with in-place relocation
1002     const bool in_place = _forwarding->in_place_relocation();
1003     if (in_place) {
1004       finish_in_place_relocation();
1005     }
1006 
1007     // Old from-space pages need to deal with remset bits
1008     if (_forwarding->from_age() == ZPageAge::old) {
1009       _forwarding->relocated_remembered_fields_after_relocate();
1010     }
1011 
1012     // Release relocated page
1013     _forwarding->release_page();
1014 
1015     if (in_place) {
1016       // Wait for all other threads to call release_page
1017       ZPage* const page = _forwarding->detach_page();
1018 
1019       // Ensure that previous remset bits are cleared
1020       clear_remset_before_reuse(page, true /* in_place */);
1021 
1022       page->log_msg(" (relocate page done in-place)");
1023 
1024       // Different pages when promoting
1025       ZPage* const target_page = target(_forwarding->to_age());
1026       _allocator->share_target_page(target_page);
1027 
1028     } else {
1029       // Wait for all other threads to call release_page
1030       ZPage* const page = _forwarding->detach_page();
1031 
1032       // Ensure that all remset bits are cleared
1033       // Note: cleared after detach_page, when we know that
1034       // the young generation isn't scanning the remset.
1035       clear_remset_before_reuse(page, false /* in_place */);
1036 
1037       page->log_msg(" (relocate page done normal)");
1038 
1039       // Free page
1040       ZHeap::heap()->free_page(page);
1041     }
1042   }
1043 };
1044 
1045 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1046 public:
1047   virtual void do_thread(Thread* thread) {
1048     JavaThread* const jt = JavaThread::cast(thread);
1049     ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1050     buffer->install_base_pointers();
1051   }
1052 };
1053 
1054 // Installs the object base pointers (object starts), for the fields written
1055 // in the store buffer. The code that searches for the object start uses that
1056 // liveness information stored in the pages. That information is lost when the
1057 // pages have been relocated and then destroyed.
1058 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1059 private:
1060   ZJavaThreadsIterator _threads_iter;
1061 
1062 public:
1063   ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1064     : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1065       _threads_iter(generation->id_optional()) {}
1066 
1067   virtual void work() {
1068     ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1069     _threads_iter.apply(&fix_store_buffer_cl);
1070   }
1071 };
1072 
1073 class ZRelocateTask : public ZRestartableTask {
1074 private:
1075   ZRelocationSetParallelIterator _iter;
1076   ZGeneration* const             _generation;
1077   ZRelocateQueue* const          _queue;
1078   ZRelocateSmallAllocator        _small_allocator;
1079   ZRelocateMediumAllocator       _medium_allocator;
1080 
1081 public:
1082   ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
1083     : ZRestartableTask("ZRelocateTask"),
1084       _iter(relocation_set),
1085       _generation(relocation_set->generation()),
1086       _queue(queue),
1087       _small_allocator(_generation),
1088       _medium_allocator(_generation) {}
1089 
1090   ~ZRelocateTask() {
1091     _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1092   }
1093 
1094   virtual void work() {
1095     ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation);
1096     ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation);
1097 
1098     const auto do_forwarding = [&](ZForwarding* forwarding) {
1099       ZPage* const page = forwarding->page();
1100       if (page->is_small()) {
1101         small.do_forwarding(forwarding);
1102       } else {
1103         medium.do_forwarding(forwarding);
1104       }
1105 
1106       // Absolute last thing done while relocating a page.
1107       //
1108       // We don't use the SuspendibleThreadSet when relocating pages.
1109       // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1110       //
1111       // After the mark_done call a safepointing could be completed and a
1112       // new GC phase could be entered.
1113       forwarding->mark_done();
1114     };
1115 
1116     const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1117       if (forwarding->claim()) {
1118         do_forwarding(forwarding);
1119       }
1120     };
1121 
1122     const auto do_forwarding_one_from_iter = [&]() {
1123       ZForwarding* forwarding;
1124 
1125       if (_iter.next(&forwarding)) {
1126         claim_and_do_forwarding(forwarding);
1127         return true;
1128       }
1129 
1130       return false;
1131     };
1132 
1133     for (;;) {
1134       // As long as there are requests in the relocate queue, there are threads
1135       // waiting in a VM state that does not allow them to be blocked. The
1136       // worker thread needs to finish relocate these pages, and allow the
1137       // other threads to continue and proceed to a blocking state. After that,
1138       // the worker threads are allowed to safepoint synchronize.
1139       for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1140         do_forwarding(forwarding);
1141       }
1142 
1143       if (!do_forwarding_one_from_iter()) {
1144         // No more work
1145         break;
1146       }
1147 
1148       if (_generation->should_worker_resize()) {
1149         break;
1150       }
1151     }
1152 
1153     _queue->leave();
1154   }
1155 
1156   virtual void resize_workers(uint nworkers) {
1157     _queue->resize_workers(nworkers);
1158   }
1159 };
1160 
1161 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1162   const zpointer ptr = Atomic::load(p);
1163 
1164   if (ZPointer::is_store_good(ptr)) {
1165     // Already has a remset entry
1166     return;
1167   }
1168 
1169   // Remset entries are used for two reasons:
1170   // 1) Young marking old-to-young pointer roots
1171   // 2) Deferred remapping of stale old-to-young pointers
1172   //
1173   // This load barrier will up-front perform the remapping of (2),
1174   // and the code below only has to make sure we register up-to-date
1175   // old-to-young pointers for (1).
1176   const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1177 
1178   if (is_null(addr)) {
1179     // No need for remset entries for null pointers
1180     return;
1181   }
1182 
1183   if (ZHeap::heap()->is_old(addr)) {
1184     // No need for remset entries for pointers to old gen
1185     return;
1186   }
1187 
1188   ZRelocate::add_remset(p);
1189 }
1190 
1191 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1192 private:
1193   ZStatTimerYoung                _timer;
1194   ZArrayParallelIterator<ZPage*> _iter;
1195 
1196 public:
1197   ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1198     : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1199       _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1200       _iter(pages) {}
1201 
1202   virtual void work() {
1203     SuspendibleThreadSetJoiner sts_joiner;
1204 
1205     for (ZPage* page; _iter.next(&page);) {
1206       page->object_iterate([&](oop obj) {
1207         ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1208       });
1209 
1210       SuspendibleThreadSet::yield();
1211       if (ZGeneration::young()->should_worker_resize()) {
1212         return;
1213       }
1214     }
1215   }
1216 };
1217 
1218 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1219   {
1220     // Install the store buffer's base pointers before the
1221     // relocate task destroys the liveness information in
1222     // the relocated pages.
1223     ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1224     workers()->run(&buffer_task);
1225   }
1226 
1227   {
1228     ZRelocateTask relocate_task(relocation_set, &_queue);
1229     workers()->run(&relocate_task);
1230   }
1231 
1232   if (relocation_set->generation()->is_young()) {
1233     ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1234     workers()->run(&task);
1235   }
1236 
1237   _queue.clear();
1238 }
1239 
1240 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1241   if (from_age == ZPageAge::old) {
1242     return ZPageAge::old;
1243   }
1244 
1245   const uint age = static_cast<uint>(from_age);
1246   if (age >= ZGeneration::young()->tenuring_threshold()) {
1247     return ZPageAge::old;
1248   }
1249 
1250   return static_cast<ZPageAge>(age + 1);
1251 }
1252 
1253 class ZFlipAgePagesTask : public ZTask {
1254 private:
1255   ZArrayParallelIterator<ZPage*> _iter;
1256 
1257 public:
1258   ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1259     : ZTask("ZPromotePagesTask"),
1260       _iter(pages) {}
1261 
1262   virtual void work() {
1263     SuspendibleThreadSetJoiner sts_joiner;
1264     ZArray<ZPage*> promoted_pages;
1265 
1266     for (ZPage* prev_page; _iter.next(&prev_page);) {
1267       const ZPageAge from_age = prev_page->age();
1268       const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1269       assert(from_age != ZPageAge::old, "invalid age for a young collection");
1270 
1271       // Figure out if this is proper promotion
1272       const bool promotion = to_age == ZPageAge::old;
1273 
1274       if (promotion) {
1275         // Before promoting an object (and before relocate start), we must ensure that all
1276         // contained zpointers are store good. The marking code ensures that for non-null
1277         // pointers, but null pointers are ignored. This code ensures that even null pointers
1278         // are made store good, for the promoted objects.
1279         prev_page->object_iterate([&](oop obj) {
1280           ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1281         });
1282       }
1283 
1284       // Logging
1285       prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1286 
1287       // Setup to-space page
1288       ZPage* const new_page = promotion ? prev_page->clone_limited_promote_flipped() : prev_page;
1289       new_page->reset(to_age, ZPageResetType::FlipAging);
1290 
1291       if (promotion) {
1292         ZGeneration::young()->flip_promote(prev_page, new_page);
1293         // Defer promoted page registration times the lock is taken
1294         promoted_pages.push(prev_page);
1295       }
1296 
1297       SuspendibleThreadSet::yield();
1298     }
1299 
1300     ZGeneration::young()->register_flip_promoted(promoted_pages);
1301   }
1302 };
1303 
1304 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1305   ZFlipAgePagesTask flip_age_task(pages);
1306   workers()->run(&flip_age_task);
1307 }
1308 
1309 void ZRelocate::synchronize() {
1310   _queue.synchronize();
1311 }
1312 
1313 void ZRelocate::desynchronize() {
1314   _queue.desynchronize();
1315 }
1316 
1317 ZRelocateQueue* ZRelocate::queue() {
1318   return &_queue;
1319 }