1 /*
   2  * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gc_globals.hpp"
  26 #include "gc/shared/suspendibleThreadSet.hpp"
  27 #include "gc/z/zAbort.inline.hpp"
  28 #include "gc/z/zAddress.inline.hpp"
  29 #include "gc/z/zAllocator.inline.hpp"
  30 #include "gc/z/zBarrier.inline.hpp"
  31 #include "gc/z/zCollectedHeap.hpp"
  32 #include "gc/z/zForwarding.inline.hpp"
  33 #include "gc/z/zGeneration.inline.hpp"
  34 #include "gc/z/zHeap.inline.hpp"
  35 #include "gc/z/zIndexDistributor.inline.hpp"
  36 #include "gc/z/zIterator.inline.hpp"
  37 #include "gc/z/zPage.inline.hpp"
  38 #include "gc/z/zPageAge.hpp"
  39 #include "gc/z/zRelocate.hpp"
  40 #include "gc/z/zRelocationSet.inline.hpp"
  41 #include "gc/z/zRootsIterator.hpp"
  42 #include "gc/z/zStackWatermark.hpp"
  43 #include "gc/z/zStat.hpp"
  44 #include "gc/z/zTask.hpp"
  45 #include "gc/z/zUncoloredRoot.inline.hpp"
  46 #include "gc/z/zVerify.hpp"
  47 #include "gc/z/zWorkers.hpp"
  48 #include "prims/jvmtiTagMap.hpp"
  49 #include "runtime/atomic.hpp"
  50 #include "utilities/debug.hpp"
  51 
  52 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
  53 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
  54 
  55 static uintptr_t forwarding_index(ZForwarding* forwarding, zoffset from_offset) {
  56   return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift();
  57 }
  58 
  59 static zaddress forwarding_find(ZForwarding* forwarding, zoffset from_offset, ZForwardingCursor* cursor) {
  60   const uintptr_t from_index = forwarding_index(forwarding, from_offset);
  61   const ZForwardingEntry entry = forwarding->find(from_index, cursor);
  62   return entry.populated() ? ZOffset::address(to_zoffset(entry.to_offset())) : zaddress::null;
  63 }
  64 
  65 static zaddress forwarding_find(ZForwarding* forwarding, zaddress_unsafe from_addr, ZForwardingCursor* cursor) {
  66   return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor);
  67 }
  68 
  69 static zaddress forwarding_find(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
  70   return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor);
  71 }
  72 
  73 static zaddress forwarding_insert(ZForwarding* forwarding, zoffset from_offset, zaddress to_addr, ZForwardingCursor* cursor) {
  74   const uintptr_t from_index = forwarding_index(forwarding, from_offset);
  75   const zoffset to_offset = ZAddress::offset(to_addr);
  76   const zoffset to_offset_final = forwarding->insert(from_index, to_offset, cursor);
  77   return ZOffset::address(to_offset_final);
  78 }
  79 
  80 static zaddress forwarding_insert(ZForwarding* forwarding, zaddress from_addr, zaddress to_addr, ZForwardingCursor* cursor) {
  81   return forwarding_insert(forwarding, ZAddress::offset(from_addr), to_addr, cursor);
  82 }
  83 
  84 ZRelocateQueue::ZRelocateQueue()
  85   : _lock(),
  86     _queue(),
  87     _nworkers(0),
  88     _nsynchronized(0),
  89     _synchronize(false),
  90     _is_active(false),
  91     _needs_attention(0) {}
  92 
  93 bool ZRelocateQueue::needs_attention() const {
  94   return Atomic::load(&_needs_attention) != 0;
  95 }
  96 
  97 void ZRelocateQueue::inc_needs_attention() {
  98   const int needs_attention = Atomic::add(&_needs_attention, 1);
  99   assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
 100 }
 101 
 102 void ZRelocateQueue::dec_needs_attention() {
 103   const int needs_attention = Atomic::sub(&_needs_attention, 1);
 104   assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
 105 }
 106 
 107 void ZRelocateQueue::activate(uint nworkers) {
 108   _is_active = true;
 109   join(nworkers);
 110 }
 111 
 112 void ZRelocateQueue::deactivate() {
 113   Atomic::store(&_is_active, false);
 114   clear();
 115 }
 116 
 117 bool ZRelocateQueue::is_active() const {
 118   return Atomic::load(&_is_active);
 119 }
 120 
 121 void ZRelocateQueue::join(uint nworkers) {
 122   assert(nworkers != 0, "Must request at least one worker");
 123   assert(_nworkers == 0, "Invalid state");
 124   assert(_nsynchronized == 0, "Invalid state");
 125 
 126   log_debug(gc, reloc)("Joining workers: %u", nworkers);
 127 
 128   _nworkers = nworkers;
 129 }
 130 
 131 void ZRelocateQueue::resize_workers(uint nworkers) {
 132   assert(nworkers != 0, "Must request at least one worker");
 133   assert(_nworkers == 0, "Invalid state");
 134   assert(_nsynchronized == 0, "Invalid state");
 135 
 136   log_debug(gc, reloc)("Resize workers: %u", nworkers);
 137 
 138   ZLocker<ZConditionLock> locker(&_lock);
 139   _nworkers = nworkers;
 140 }
 141 
 142 void ZRelocateQueue::leave() {
 143   ZLocker<ZConditionLock> locker(&_lock);
 144   _nworkers--;
 145 
 146   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 147 
 148   log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
 149 
 150   // Prune done forwardings
 151   const bool forwardings_done = prune();
 152 
 153   // Check if all workers synchronized
 154   const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
 155 
 156   if (forwardings_done || last_synchronized) {
 157     _lock.notify_all();
 158   }
 159 }
 160 
 161 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
 162   ZStatTimer timer(ZCriticalPhaseRelocationStall);
 163   ZLocker<ZConditionLock> locker(&_lock);
 164 
 165   if (forwarding->is_done()) {
 166     return;
 167   }
 168 
 169   _queue.append(forwarding);
 170   if (_queue.length() == 1) {
 171     // Queue became non-empty
 172     inc_needs_attention();
 173     _lock.notify_all();
 174   }
 175 
 176   while (!forwarding->is_done()) {
 177     _lock.wait();
 178   }
 179 }
 180 
 181 bool ZRelocateQueue::prune() {
 182   if (_queue.is_empty()) {
 183     return false;
 184   }
 185 
 186   bool done = false;
 187 
 188   for (int i = 0; i < _queue.length();) {
 189     const ZForwarding* const forwarding = _queue.at(i);
 190     if (forwarding->is_done()) {
 191       done = true;
 192 
 193       _queue.delete_at(i);
 194     } else {
 195       i++;
 196     }
 197   }
 198 
 199   if (_queue.is_empty()) {
 200     dec_needs_attention();
 201   }
 202 
 203   return done;
 204 }
 205 
 206 ZForwarding* ZRelocateQueue::prune_and_claim() {
 207   if (prune()) {
 208     _lock.notify_all();
 209   }
 210 
 211   for (int i = 0; i < _queue.length(); i++) {
 212     ZForwarding* const forwarding = _queue.at(i);
 213     if (forwarding->claim()) {
 214       return forwarding;
 215     }
 216   }
 217 
 218   return nullptr;
 219 }
 220 
 221 class ZRelocateQueueSynchronizeThread {
 222 private:
 223   ZRelocateQueue* const _queue;
 224 
 225 public:
 226   ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
 227     : _queue(queue) {
 228     _queue->synchronize_thread();
 229   }
 230 
 231   ~ZRelocateQueueSynchronizeThread() {
 232     _queue->desynchronize_thread();
 233   }
 234 };
 235 
 236 void ZRelocateQueue::synchronize_thread() {
 237   _nsynchronized++;
 238 
 239   log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
 240 
 241   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 242   if (_nsynchronized == _nworkers) {
 243     // All workers synchronized
 244     _lock.notify_all();
 245   }
 246 }
 247 
 248 void ZRelocateQueue::desynchronize_thread() {
 249   _nsynchronized--;
 250 
 251   log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
 252 
 253   assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 254 }
 255 
 256 ZForwarding* ZRelocateQueue::synchronize_poll() {
 257   // Fast path avoids locking
 258   if (!needs_attention()) {
 259     return nullptr;
 260   }
 261 
 262   // Slow path to get the next forwarding and/or synchronize
 263   ZLocker<ZConditionLock> locker(&_lock);
 264 
 265   {
 266     ZForwarding* const forwarding = prune_and_claim();
 267     if (forwarding != nullptr) {
 268       // Don't become synchronized while there are elements in the queue
 269       return forwarding;
 270     }
 271   }
 272 
 273   if (!_synchronize) {
 274     return nullptr;
 275   }
 276 
 277   ZRelocateQueueSynchronizeThread rqst(this);
 278 
 279   do {
 280     _lock.wait();
 281 
 282     ZForwarding* const forwarding = prune_and_claim();
 283     if (forwarding != nullptr) {
 284       return forwarding;
 285     }
 286   } while (_synchronize);
 287 
 288   return nullptr;
 289 }
 290 
 291 void ZRelocateQueue::clear() {
 292   assert(_nworkers == 0, "Invalid state");
 293 
 294   if (_queue.is_empty()) {
 295     return;
 296   }
 297 
 298   ZArrayIterator<ZForwarding*> iter(&_queue);
 299   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 300     assert(forwarding->is_done(), "All should be done");
 301   }
 302 
 303   assert(false, "Clear was not empty");
 304 
 305   _queue.clear();
 306   dec_needs_attention();
 307 }
 308 
 309 void ZRelocateQueue::synchronize() {
 310   ZLocker<ZConditionLock> locker(&_lock);
 311   _synchronize = true;
 312 
 313   inc_needs_attention();
 314 
 315   log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 316 
 317   while (_nworkers != _nsynchronized) {
 318     _lock.wait();
 319     log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 320   }
 321 }
 322 
 323 void ZRelocateQueue::desynchronize() {
 324   ZLocker<ZConditionLock> locker(&_lock);
 325   _synchronize = false;
 326 
 327   log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 328 
 329   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 330 
 331   dec_needs_attention();
 332 
 333   _lock.notify_all();
 334 }
 335 
 336 ZRelocate::ZRelocate(ZGeneration* generation)
 337   : _generation(generation),
 338     _queue() {}
 339 
 340 ZWorkers* ZRelocate::workers() const {
 341   return _generation->workers();
 342 }
 343 
 344 void ZRelocate::start() {
 345   _queue.activate(workers()->active_workers());
 346 }
 347 
 348 void ZRelocate::add_remset(volatile zpointer* p) {
 349   ZGeneration::young()->remember(p);
 350 }
 351 
 352 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 353   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 354 
 355   // Allocate object
 356   const size_t size = ZUtils::object_size(from_addr);
 357 
 358   ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
 359 
 360   const zaddress to_addr = allocator->alloc_object(size);
 361 
 362   if (is_null(to_addr)) {
 363     // Allocation failed
 364     return zaddress::null;
 365   }
 366 
 367   // Copy object
 368   ZUtils::object_copy_disjoint(from_addr, to_addr, size);
 369 
 370   // Insert forwarding
 371   const zaddress to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor);
 372 
 373   if (to_addr_final != to_addr) {
 374     // Already relocated, try undo allocation
 375     allocator->undo_alloc_object(to_addr, size);
 376   }
 377 
 378   return to_addr_final;
 379 }
 380 
 381 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 382   ZForwardingCursor cursor;
 383 
 384   // Lookup forwarding
 385   zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor);
 386   if (!is_null(to_addr)) {
 387     // Already relocated
 388     return to_addr;
 389   }
 390 
 391   // Relocate object
 392   if (forwarding->retain_page(&_queue)) {
 393     assert(_generation->is_phase_relocate(), "Must be");
 394     to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
 395     forwarding->release_page();
 396 
 397     if (!is_null(to_addr)) {
 398       // Success
 399       return to_addr;
 400     }
 401 
 402     // Failed to relocate object. Signal and wait for a worker thread to
 403     // complete relocation of this page, and then forward the object.
 404     _queue.add_and_wait(forwarding);
 405   }
 406 
 407   // Forward object
 408   return forward_object(forwarding, from_addr);
 409 }
 410 
 411 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 412   ZForwardingCursor cursor;
 413   const zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor);
 414   assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
 415   return to_addr;
 416 }
 417 
 418 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) {
 419   if (ZStressRelocateInPlace) {
 420     // Simulate failure to allocate a new page. This will
 421     // cause the page being relocated to be relocated in-place.
 422     return nullptr;
 423   }
 424 
 425   ZAllocationFlags flags;
 426   flags.set_non_blocking();
 427   flags.set_gc_relocation();
 428 
 429   return allocator->alloc_page_for_relocation(type, size, flags);
 430 }
 431 
 432 static void retire_target_page(ZGeneration* generation, ZPage* page) {
 433   if (generation->is_young() && page->is_old()) {
 434     generation->increase_promoted(page->used());
 435   } else {
 436     generation->increase_compacted(page->used());
 437   }
 438 
 439   // Free target page if it is empty. We can end up with an empty target
 440   // page if we allocated a new target page, and then lost the race to
 441   // relocate the remaining objects, leaving the target page empty when
 442   // relocation completed.
 443   if (page->used() == 0) {
 444     ZHeap::heap()->free_page(page);
 445   }
 446 }
 447 
 448 class ZRelocateSmallAllocator {
 449 private:
 450   ZGeneration* const _generation;
 451   volatile size_t    _in_place_count;
 452 
 453 public:
 454   ZRelocateSmallAllocator(ZGeneration* generation)
 455     : _generation(generation),
 456       _in_place_count(0) {}
 457 
 458   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 459     ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 460     ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size());
 461     if (page == nullptr) {
 462       Atomic::inc(&_in_place_count);
 463     }
 464 
 465     if (target != nullptr) {
 466       // Retire the old target page
 467       retire_target_page(_generation, target);
 468     }
 469 
 470     return page;
 471   }
 472 
 473   void share_target_page(ZPage* page) {
 474     // Does nothing
 475   }
 476 
 477   void free_target_page(ZPage* page) {
 478     if (page != nullptr) {
 479       retire_target_page(_generation, page);
 480     }
 481   }
 482 
 483   zaddress alloc_object(ZPage* page, size_t size) const {
 484     return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
 485   }
 486 
 487   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 488     page->undo_alloc_object(addr, size);
 489   }
 490 
 491   const size_t in_place_count() const {
 492     return _in_place_count;
 493   }
 494 };
 495 
 496 class ZRelocateMediumAllocator {
 497 private:
 498   ZGeneration* const _generation;
 499   ZConditionLock     _lock;
 500   ZPage*             _shared[ZAllocator::_relocation_allocators];
 501   bool               _in_place;
 502   volatile size_t    _in_place_count;
 503 
 504 public:
 505   ZRelocateMediumAllocator(ZGeneration* generation)
 506     : _generation(generation),
 507       _lock(),
 508       _shared(),
 509       _in_place(false),
 510       _in_place_count(0) {}
 511 
 512   ~ZRelocateMediumAllocator() {
 513     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 514       if (_shared[i] != nullptr) {
 515         retire_target_page(_generation, _shared[i]);
 516       }
 517     }
 518   }
 519 
 520   ZPage* shared(ZPageAge age) {
 521     return _shared[static_cast<uint>(age) - 1];
 522   }
 523 
 524   void set_shared(ZPageAge age, ZPage* page) {
 525     _shared[static_cast<uint>(age) - 1] = page;
 526   }
 527 
 528   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 529     ZLocker<ZConditionLock> locker(&_lock);
 530 
 531     // Wait for any ongoing in-place relocation to complete
 532     while (_in_place) {
 533       _lock.wait();
 534     }
 535 
 536     // Allocate a new page only if the shared page is the same as the
 537     // current target page. The shared page will be different from the
 538     // current target page if another thread shared a page, or allocated
 539     // a new page.
 540     const ZPageAge to_age = forwarding->to_age();
 541     if (shared(to_age) == target) {
 542       ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 543       ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size());
 544       set_shared(to_age, to_page);
 545       if (to_page == nullptr) {
 546         Atomic::inc(&_in_place_count);
 547         _in_place = true;
 548       }
 549 
 550       // This thread is responsible for retiring the shared target page
 551       if (target != nullptr) {
 552         retire_target_page(_generation, target);
 553       }
 554     }
 555 
 556     return shared(to_age);
 557   }
 558 
 559   void share_target_page(ZPage* page) {
 560     const ZPageAge age = page->age();
 561 
 562     ZLocker<ZConditionLock> locker(&_lock);
 563     assert(_in_place, "Invalid state");
 564     assert(shared(age) == nullptr, "Invalid state");
 565     assert(page != nullptr, "Invalid page");
 566 
 567     set_shared(age, page);
 568     _in_place = false;
 569 
 570     _lock.notify_all();
 571   }
 572 
 573   void free_target_page(ZPage* page) {
 574     // Does nothing
 575   }
 576 
 577   zaddress alloc_object(ZPage* page, size_t size) const {
 578     return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
 579   }
 580 
 581   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 582     page->undo_alloc_object_atomic(addr, size);
 583   }
 584 
 585   const size_t in_place_count() const {
 586     return _in_place_count;
 587   }
 588 };
 589 
 590 template <typename Allocator>
 591 class ZRelocateWork : public StackObj {
 592 private:
 593   Allocator* const   _allocator;
 594   ZForwarding*       _forwarding;
 595   ZPage*             _target[ZAllocator::_relocation_allocators];
 596   ZGeneration* const _generation;
 597   size_t             _other_promoted;
 598   size_t             _other_compacted;
 599 
 600   ZPage* target(ZPageAge age) {
 601     return _target[static_cast<uint>(age) - 1];
 602   }
 603 
 604   void set_target(ZPageAge age, ZPage* page) {
 605     _target[static_cast<uint>(age) - 1] = page;
 606   }
 607 
 608   size_t object_alignment() const {
 609     return (size_t)1 << _forwarding->object_alignment_shift();
 610   }
 611 
 612   void increase_other_forwarded(size_t unaligned_object_size) {
 613     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 614     if (_forwarding->is_promotion()) {
 615       _other_promoted += aligned_size;
 616     } else {
 617       _other_compacted += aligned_size;
 618     }
 619   }
 620 
 621   zaddress try_relocate_object_inner(zaddress from_addr) {
 622     ZForwardingCursor cursor;
 623 
 624     const size_t size = ZUtils::object_size(from_addr);
 625     ZPage* const to_page = target(_forwarding->to_age());
 626 
 627     // Lookup forwarding
 628     {
 629       const zaddress to_addr = forwarding_find(_forwarding, from_addr, &cursor);
 630       if (!is_null(to_addr)) {
 631         // Already relocated
 632         increase_other_forwarded(size);
 633         return to_addr;
 634       }
 635     }
 636 
 637     // Allocate object
 638     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 639     if (is_null(allocated_addr)) {
 640       // Allocation failed
 641       return zaddress::null;
 642     }
 643 
 644     // Copy object. Use conjoint copying if we are relocating
 645     // in-place and the new object overlaps with the old object.
 646     if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
 647       ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
 648     } else {
 649       ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
 650     }
 651 
 652     // Insert forwarding
 653     const zaddress to_addr = forwarding_insert(_forwarding, from_addr, allocated_addr, &cursor);
 654     if (to_addr != allocated_addr) {
 655       // Already relocated, undo allocation
 656       _allocator->undo_alloc_object(to_page, to_addr, size);
 657       increase_other_forwarded(size);
 658     }
 659 
 660     return to_addr;
 661   }
 662 
 663   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
 664     // Old-to-old relocation - move existing remset bits
 665 
 666     // If this is called for an in-place relocated page, then this code has the
 667     // responsibility to clear the old remset bits. Extra care is needed because:
 668     //
 669     // 1) The to-object copy can overlap with the from-object copy
 670     // 2) Remset bits of old objects need to be cleared
 671     //
 672     // A watermark is used to keep track of how far the old remset bits have been removed.
 673 
 674     const bool in_place = _forwarding->in_place_relocation();
 675     ZPage* const from_page = _forwarding->page();
 676     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 677 
 678     // Note: even with in-place relocation, the to_page could be another page
 679     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 680 
 681     // Uses _relaxed version to handle that in-place relocation resets _top
 682     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 683     assert(to_page->is_in(to_addr), "Must be");
 684 
 685 
 686     // Read the size from the to-object, since the from-object
 687     // could have been overwritten during in-place relocation.
 688     const size_t size = ZUtils::object_size(to_addr);
 689 
 690     // If a young generation collection started while the old generation
 691     // relocated  objects, the remember set bits were flipped from "current"
 692     // to "previous".
 693     //
 694     // We need to select the correct remembered sets bitmap to ensure that the
 695     // old remset bits are found.
 696     //
 697     // Note that if the young generation marking (remset scanning) finishes
 698     // before the old generation relocation has relocated this page, then the
 699     // young generation will visit this page's previous remembered set bits and
 700     // moved them over to the current bitmap.
 701     //
 702     // If the young generation runs multiple cycles while the old generation is
 703     // relocating, then the first cycle will have consume the the old remset,
 704     // bits and moved associated objects to a new old page. The old relocation
 705     // could find either the the two bitmaps. So, either it will find the original
 706     // remset bits for the page, or it will find an empty bitmap for the page. It
 707     // doesn't matter for correctness, because the young generation marking has
 708     // already taken care of the bits.
 709 
 710     const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
 711 
 712     // When in-place relocation is done and the old remset bits are located in
 713     // the bitmap that is going to be used for the new remset bits, then we
 714     // need to clear the old bits before the new bits are inserted.
 715     const bool iterate_current_remset = active_remset_is_current && !in_place;
 716 
 717     BitMap::Iterator iter = iterate_current_remset
 718         ? from_page->remset_iterator_limited_current(from_local_offset, size)
 719         : from_page->remset_iterator_limited_previous(from_local_offset, size);
 720 
 721     for (BitMap::idx_t field_bit : iter) {
 722       const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
 723 
 724       // Add remset entry in the to-page
 725       const uintptr_t offset = field_local_offset - from_local_offset;
 726       const zaddress to_field = to_addr + offset;
 727       log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
 728           untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
 729 
 730       volatile zpointer* const p = (volatile zpointer*)to_field;
 731 
 732       if (ZGeneration::young()->is_phase_mark()) {
 733         // Young generation remembered set scanning needs to know about this
 734         // field. It will take responsibility to add a new remember set entry if needed.
 735         _forwarding->relocated_remembered_fields_register(p);
 736       } else {
 737         to_page->remember(p);
 738         if (in_place) {
 739           assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
 740         }
 741       }
 742     }
 743   }
 744 
 745   static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
 746     if (ZHeap::heap()->is_young(addr)) {
 747       ZRelocate::add_remset(p);
 748       return true;
 749     }
 750 
 751     return false;
 752   }
 753 
 754   static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
 755     const zpointer ptr = Atomic::load(p);
 756 
 757     assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
 758 
 759     if (ZPointer::is_store_good(ptr)) {
 760       // Already has a remset entry
 761       return;
 762     }
 763 
 764     if (ZPointer::is_load_good(ptr)) {
 765       if (!is_null_any(ptr)) {
 766         const zaddress addr = ZPointer::uncolor(ptr);
 767         add_remset_if_young(p, addr);
 768       }
 769       // No need to remap it is already load good
 770       return;
 771     }
 772 
 773     if (is_null_any(ptr)) {
 774       // Eagerly remap to skip adding a remset entry just to get deferred remapping
 775       ZBarrier::remap_young_relocated(p, ptr);
 776       return;
 777     }
 778 
 779     const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
 780     ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
 781 
 782     if (forwarding == nullptr) {
 783       // Object isn't being relocated
 784       const zaddress addr = safe(addr_unsafe);
 785       if (!add_remset_if_young(p, addr)) {
 786         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 787         ZBarrier::remap_young_relocated(p, ptr);
 788       }
 789       return;
 790     }
 791 
 792     const zaddress addr = forwarding->find(addr_unsafe);
 793 
 794     if (!is_null(addr)) {
 795       // Object has already been relocated
 796       if (!add_remset_if_young(p, addr)) {
 797         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 798         ZBarrier::remap_young_relocated(p, ptr);
 799       }
 800       return;
 801     }
 802 
 803     // Object has not been relocated yet
 804     // Don't want to eagerly relocate objects, so just add a remset
 805     ZRelocate::add_remset(p);
 806     return;
 807   }
 808 
 809   void update_remset_promoted(zaddress to_addr) const {
 810     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 811   }
 812 
 813   void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
 814     if (_forwarding->to_age() != ZPageAge::old) {
 815       // No remembered set in young pages
 816       return;
 817     }
 818 
 819     // Need to deal with remset when moving objects to the old generation
 820     if (_forwarding->from_age() == ZPageAge::old) {
 821       update_remset_old_to_old(from_addr, to_addr);
 822       return;
 823     }
 824 
 825     // Normal promotion
 826     update_remset_promoted(to_addr);
 827   }
 828 
 829   bool try_relocate_object(zaddress from_addr) {
 830     const zaddress to_addr = try_relocate_object_inner(from_addr);
 831 
 832     if (is_null(to_addr)) {
 833       return false;
 834     }
 835 
 836     update_remset_for_fields(from_addr, to_addr);
 837 
 838     return true;
 839   }
 840 
 841   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 842     if (_forwarding->from_age() != ZPageAge::old) {
 843       // Only old pages have use remset bits
 844       return;
 845     }
 846 
 847     if (ZGeneration::old()->active_remset_is_current()) {
 848       // We want to iterate over and clear the remset bits of the from-space page,
 849       // and insert current bits in the to-space page. However, with in-place
 850       // relocation, the from-space and to-space pages are the same. Clearing
 851       // is destructive, and is difficult to perform before or during the iteration.
 852       // However, clearing of the current bits has to be done before exposing the
 853       // to-space objects in the forwarding table.
 854       //
 855       // To solve this tricky dependency problem, we start by stashing away the
 856       // current bits in the previous bits, and clearing the current bits
 857       // (implemented by swapping the bits). This way, the current bits are
 858       // cleared before copying the objects (like a normal to-space page),
 859       // and the previous bits are representing a copy of the current bits
 860       // of the from-space page, and are used for iteration.
 861       from_page->swap_remset_bitmaps();
 862     }
 863   }
 864 
 865   ZPage* start_in_place_relocation(zoffset relocated_watermark) {
 866     _forwarding->in_place_relocation_claim_page();
 867     _forwarding->in_place_relocation_start(relocated_watermark);
 868 
 869     ZPage* const from_page = _forwarding->page();
 870 
 871     const ZPageAge to_age = _forwarding->to_age();
 872     const bool promotion = _forwarding->is_promotion();
 873 
 874     // Promotions happen through a new cloned page
 875     ZPage* const to_page = promotion ? from_page->clone_limited() : from_page;
 876     to_page->reset(to_age, ZPageResetType::InPlaceRelocation);
 877 
 878     // Clear remset bits for all objects that were relocated
 879     // before this page became an in-place relocated page.
 880     start_in_place_relocation_prepare_remset(from_page);
 881 
 882     if (promotion) {
 883       // Register the the promotion
 884       ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
 885       ZGeneration::young()->register_in_place_relocate_promoted(from_page);
 886     }
 887 
 888     return to_page;
 889   }
 890 
 891   void relocate_object(oop obj) {
 892     const zaddress addr = to_zaddress(obj);
 893     assert(ZHeap::heap()->is_object_live(addr), "Should be live");
 894 
 895     while (!try_relocate_object(addr)) {
 896       // Allocate a new target page, or if that fails, use the page being
 897       // relocated as the new target, which will cause it to be relocated
 898       // in-place.
 899       const ZPageAge to_age = _forwarding->to_age();
 900       ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age));
 901       set_target(to_age, to_page);
 902       if (to_page != nullptr) {
 903         continue;
 904       }
 905 
 906       // Start in-place relocation to block other threads from accessing
 907       // the page, or its forwarding table, until it has been released
 908       // (relocation completed).
 909       to_page = start_in_place_relocation(ZAddress::offset(addr));
 910       set_target(to_age, to_page);
 911     }
 912   }
 913 
 914 public:
 915   ZRelocateWork(Allocator* allocator, ZGeneration* generation)
 916     : _allocator(allocator),
 917       _forwarding(nullptr),
 918       _target(),
 919       _generation(generation),
 920       _other_promoted(0),
 921       _other_compacted(0) {}
 922 
 923   ~ZRelocateWork() {
 924     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 925       _allocator->free_target_page(_target[i]);
 926     }
 927     // Report statistics on-behalf of non-worker threads
 928     _generation->increase_promoted(_other_promoted);
 929     _generation->increase_compacted(_other_compacted);
 930   }
 931 
 932   bool active_remset_is_current() const {
 933     // Normal old-to-old relocation can treat the from-page remset as a
 934     // read-only copy, and then copy over the appropriate remset bits to the
 935     // cleared to-page's 'current' remset bitmap.
 936     //
 937     // In-place relocation is more complicated. Since, the same page is both
 938     // a from-page and a to-page, we need to remove the old remset bits, and
 939     // add remset bits that corresponds to the new locations of the relocated
 940     // objects.
 941     //
 942     // Depending on how long ago (in terms of number of young GC's and the
 943     // current young GC's phase), the page was allocated, the active
 944     // remembered set will be in either the 'current' or 'previous' bitmap.
 945     //
 946     // If the active bits are in the 'previous' bitmap, we know that the
 947     // 'current' bitmap was cleared at some earlier point in time, and we can
 948     // simply set new bits in 'current' bitmap, and later when relocation has
 949     // read all the old remset bits, we could just clear the 'previous' remset
 950     // bitmap.
 951     //
 952     // If, on the other hand, the active bits are in the 'current' bitmap, then
 953     // that bitmap will be used to both read the old remset bits, and the
 954     // destination for the remset bits that we copy when an object is copied
 955     // to it's new location within the page. We need to *carefully* remove all
 956     // all old remset bits, without clearing out the newly set bits.
 957     return ZGeneration::old()->active_remset_is_current();
 958   }
 959 
 960   void clear_remset_before_reuse(ZPage* page, bool in_place) {
 961     if (_forwarding->from_age() != ZPageAge::old) {
 962       // No remset bits
 963       return;
 964     }
 965 
 966     if (in_place) {
 967       // Clear 'previous' remset bits. For in-place relocated pages, the previous
 968       // remset bits are always used, even when active_remset_is_current().
 969       page->clear_remset_previous();
 970 
 971       return;
 972     }
 973 
 974     // Normal relocate
 975 
 976     // Clear active remset bits
 977     if (active_remset_is_current()) {
 978       page->clear_remset_current();
 979     } else {
 980       page->clear_remset_previous();
 981     }
 982 
 983     // Verify that inactive remset bits are all cleared
 984     if (active_remset_is_current()) {
 985       page->verify_remset_cleared_previous();
 986     } else {
 987       page->verify_remset_cleared_current();
 988     }
 989   }
 990 
 991   void finish_in_place_relocation() {
 992     // We are done with the from_space copy of the page
 993     _forwarding->in_place_relocation_finish();
 994   }
 995 
 996   void do_forwarding(ZForwarding* forwarding) {
 997     _forwarding = forwarding;
 998 
 999     _forwarding->page()->log_msg(" (relocate page)");
1000 
1001     ZVerify::before_relocation(_forwarding);
1002 
1003     // Relocate objects
1004     _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
1005 
1006     ZVerify::after_relocation(_forwarding);
1007 
1008     // Verify
1009     if (ZVerifyForwarding) {
1010       _forwarding->verify();
1011     }
1012 
1013     _generation->increase_freed(_forwarding->page()->size());
1014 
1015     // Deal with in-place relocation
1016     const bool in_place = _forwarding->in_place_relocation();
1017     if (in_place) {
1018       finish_in_place_relocation();
1019     }
1020 
1021     // Old from-space pages need to deal with remset bits
1022     if (_forwarding->from_age() == ZPageAge::old) {
1023       _forwarding->relocated_remembered_fields_after_relocate();
1024     }
1025 
1026     // Release relocated page
1027     _forwarding->release_page();
1028 
1029     if (in_place) {
1030       // Wait for all other threads to call release_page
1031       ZPage* const page = _forwarding->detach_page();
1032 
1033       // Ensure that previous remset bits are cleared
1034       clear_remset_before_reuse(page, true /* in_place */);
1035 
1036       page->log_msg(" (relocate page done in-place)");
1037 
1038       // Different pages when promoting
1039       ZPage* const target_page = target(_forwarding->to_age());
1040       _allocator->share_target_page(target_page);
1041 
1042     } else {
1043       // Wait for all other threads to call release_page
1044       ZPage* const page = _forwarding->detach_page();
1045 
1046       // Ensure that all remset bits are cleared
1047       // Note: cleared after detach_page, when we know that
1048       // the young generation isn't scanning the remset.
1049       clear_remset_before_reuse(page, false /* in_place */);
1050 
1051       page->log_msg(" (relocate page done normal)");
1052 
1053       // Free page
1054       ZHeap::heap()->free_page(page);
1055     }
1056   }
1057 };
1058 
1059 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1060 public:
1061   virtual void do_thread(Thread* thread) {
1062     JavaThread* const jt = JavaThread::cast(thread);
1063     ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1064     buffer->install_base_pointers();
1065   }
1066 };
1067 
1068 // Installs the object base pointers (object starts), for the fields written
1069 // in the store buffer. The code that searches for the object start uses that
1070 // liveness information stored in the pages. That information is lost when the
1071 // pages have been relocated and then destroyed.
1072 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1073 private:
1074   ZJavaThreadsIterator _threads_iter;
1075 
1076 public:
1077   ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1078     : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1079       _threads_iter(generation->id_optional()) {}
1080 
1081   virtual void work() {
1082     ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1083     _threads_iter.apply(&fix_store_buffer_cl);
1084   }
1085 };
1086 
1087 class ZRelocateTask : public ZRestartableTask {
1088 private:
1089   ZRelocationSetParallelIterator _iter;
1090   ZGeneration* const             _generation;
1091   ZRelocateQueue* const          _queue;
1092   ZRelocateSmallAllocator        _small_allocator;
1093   ZRelocateMediumAllocator       _medium_allocator;
1094 
1095 public:
1096   ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
1097     : ZRestartableTask("ZRelocateTask"),
1098       _iter(relocation_set),
1099       _generation(relocation_set->generation()),
1100       _queue(queue),
1101       _small_allocator(_generation),
1102       _medium_allocator(_generation) {}
1103 
1104   ~ZRelocateTask() {
1105     _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1106 
1107     // Signal that we're not using the queue anymore. Used mostly for asserts.
1108     _queue->deactivate();
1109   }
1110 
1111   virtual void work() {
1112     ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation);
1113     ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation);
1114 
1115     const auto do_forwarding = [&](ZForwarding* forwarding) {
1116       ZPage* const page = forwarding->page();
1117       if (page->is_small()) {
1118         small.do_forwarding(forwarding);
1119       } else {
1120         medium.do_forwarding(forwarding);
1121       }
1122 
1123       // Absolute last thing done while relocating a page.
1124       //
1125       // We don't use the SuspendibleThreadSet when relocating pages.
1126       // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1127       //
1128       // After the mark_done call a safepointing could be completed and a
1129       // new GC phase could be entered.
1130       forwarding->mark_done();
1131     };
1132 
1133     const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1134       if (forwarding->claim()) {
1135         do_forwarding(forwarding);
1136       }
1137     };
1138 
1139     const auto do_forwarding_one_from_iter = [&]() {
1140       ZForwarding* forwarding;
1141 
1142       if (_iter.next(&forwarding)) {
1143         claim_and_do_forwarding(forwarding);
1144         return true;
1145       }
1146 
1147       return false;
1148     };
1149 
1150     for (;;) {
1151       // As long as there are requests in the relocate queue, there are threads
1152       // waiting in a VM state that does not allow them to be blocked. The
1153       // worker thread needs to finish relocate these pages, and allow the
1154       // other threads to continue and proceed to a blocking state. After that,
1155       // the worker threads are allowed to safepoint synchronize.
1156       for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1157         do_forwarding(forwarding);
1158       }
1159 
1160       if (!do_forwarding_one_from_iter()) {
1161         // No more work
1162         break;
1163       }
1164 
1165       if (_generation->should_worker_resize()) {
1166         break;
1167       }
1168     }
1169 
1170     _queue->leave();
1171   }
1172 
1173   virtual void resize_workers(uint nworkers) {
1174     _queue->resize_workers(nworkers);
1175   }
1176 };
1177 
1178 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1179   const zpointer ptr = Atomic::load(p);
1180 
1181   if (ZPointer::is_store_good(ptr)) {
1182     // Already has a remset entry
1183     return;
1184   }
1185 
1186   // Remset entries are used for two reasons:
1187   // 1) Young marking old-to-young pointer roots
1188   // 2) Deferred remapping of stale old-to-young pointers
1189   //
1190   // This load barrier will up-front perform the remapping of (2),
1191   // and the code below only has to make sure we register up-to-date
1192   // old-to-young pointers for (1).
1193   const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1194 
1195   if (is_null(addr)) {
1196     // No need for remset entries for null pointers
1197     return;
1198   }
1199 
1200   if (ZHeap::heap()->is_old(addr)) {
1201     // No need for remset entries for pointers to old gen
1202     return;
1203   }
1204 
1205   ZRelocate::add_remset(p);
1206 }
1207 
1208 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1209 private:
1210   ZStatTimerYoung                _timer;
1211   ZArrayParallelIterator<ZPage*> _iter;
1212 
1213 public:
1214   ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1215     : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1216       _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1217       _iter(pages) {}
1218 
1219   virtual void work() {
1220     SuspendibleThreadSetJoiner sts_joiner;
1221 
1222     for (ZPage* page; _iter.next(&page);) {
1223       page->object_iterate([&](oop obj) {
1224         ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1225       });
1226 
1227       SuspendibleThreadSet::yield();
1228       if (ZGeneration::young()->should_worker_resize()) {
1229         return;
1230       }
1231     }
1232   }
1233 };
1234 
1235 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1236   {
1237     // Install the store buffer's base pointers before the
1238     // relocate task destroys the liveness information in
1239     // the relocated pages.
1240     ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1241     workers()->run(&buffer_task);
1242   }
1243 
1244   {
1245     ZRelocateTask relocate_task(relocation_set, &_queue);
1246     workers()->run(&relocate_task);
1247   }
1248 
1249   if (relocation_set->generation()->is_young()) {
1250     ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1251     workers()->run(&task);
1252   }
1253 }
1254 
1255 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1256   if (from_age == ZPageAge::old) {
1257     return ZPageAge::old;
1258   }
1259 
1260   const uint age = static_cast<uint>(from_age);
1261   if (age >= ZGeneration::young()->tenuring_threshold()) {
1262     return ZPageAge::old;
1263   }
1264 
1265   return static_cast<ZPageAge>(age + 1);
1266 }
1267 
1268 class ZFlipAgePagesTask : public ZTask {
1269 private:
1270   ZArrayParallelIterator<ZPage*> _iter;
1271 
1272 public:
1273   ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1274     : ZTask("ZPromotePagesTask"),
1275       _iter(pages) {}
1276 
1277   virtual void work() {
1278     SuspendibleThreadSetJoiner sts_joiner;
1279     ZArray<ZPage*> promoted_pages;
1280 
1281     for (ZPage* prev_page; _iter.next(&prev_page);) {
1282       const ZPageAge from_age = prev_page->age();
1283       const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1284       assert(from_age != ZPageAge::old, "invalid age for a young collection");
1285 
1286       // Figure out if this is proper promotion
1287       const bool promotion = to_age == ZPageAge::old;
1288 
1289       if (promotion) {
1290         // Before promoting an object (and before relocate start), we must ensure that all
1291         // contained zpointers are store good. The marking code ensures that for non-null
1292         // pointers, but null pointers are ignored. This code ensures that even null pointers
1293         // are made store good, for the promoted objects.
1294         prev_page->object_iterate([&](oop obj) {
1295           ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1296         });
1297       }
1298 
1299       // Logging
1300       prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1301 
1302       // Setup to-space page
1303       ZPage* const new_page = promotion ? prev_page->clone_limited_promote_flipped() : prev_page;
1304       new_page->reset(to_age, ZPageResetType::FlipAging);
1305 
1306       if (promotion) {
1307         ZGeneration::young()->flip_promote(prev_page, new_page);
1308         // Defer promoted page registration times the lock is taken
1309         promoted_pages.push(prev_page);
1310       }
1311 
1312       SuspendibleThreadSet::yield();
1313     }
1314 
1315     ZGeneration::young()->register_flip_promoted(promoted_pages);
1316   }
1317 };
1318 
1319 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1320   ZFlipAgePagesTask flip_age_task(pages);
1321   workers()->run(&flip_age_task);
1322 }
1323 
1324 void ZRelocate::synchronize() {
1325   _queue.synchronize();
1326 }
1327 
1328 void ZRelocate::desynchronize() {
1329   _queue.desynchronize();
1330 }
1331 
1332 ZRelocateQueue* ZRelocate::queue() {
1333   return &_queue;
1334 }
1335 
1336 bool ZRelocate::is_queue_active() const {
1337   return _queue.is_active();
1338 }