1 /*
   2  * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "gc/shared/gc_globals.hpp"
  25 #include "gc/shared/suspendibleThreadSet.hpp"
  26 #include "gc/z/zAbort.inline.hpp"
  27 #include "gc/z/zAddress.inline.hpp"
  28 #include "gc/z/zAllocator.inline.hpp"
  29 #include "gc/z/zBarrier.inline.hpp"
  30 #include "gc/z/zCollectedHeap.hpp"
  31 #include "gc/z/zForwarding.inline.hpp"
  32 #include "gc/z/zGeneration.inline.hpp"
  33 #include "gc/z/zHeap.inline.hpp"
  34 #include "gc/z/zIndexDistributor.inline.hpp"
  35 #include "gc/z/zIterator.inline.hpp"
  36 #include "gc/z/zPage.inline.hpp"
  37 #include "gc/z/zPageAge.hpp"
  38 #include "gc/z/zRelocate.hpp"
  39 #include "gc/z/zRelocationSet.inline.hpp"
  40 #include "gc/z/zRootsIterator.hpp"
  41 #include "gc/z/zStackWatermark.hpp"
  42 #include "gc/z/zStat.hpp"
  43 #include "gc/z/zTask.hpp"
  44 #include "gc/z/zUncoloredRoot.inline.hpp"
  45 #include "gc/z/zVerify.hpp"
  46 #include "gc/z/zWorkers.hpp"
  47 #include "prims/jvmtiTagMap.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "utilities/debug.hpp"
  50 
  51 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
  52 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
  53 
  54 ZRelocateQueue::ZRelocateQueue()
  55   : _lock(),
  56     _queue(),
  57     _nworkers(0),
  58     _nsynchronized(0),
  59     _synchronize(false),
  60     _is_active(false),
  61     _needs_attention(0) {}
  62 
  63 bool ZRelocateQueue::needs_attention() const {
  64   return Atomic::load(&_needs_attention) != 0;
  65 }
  66 
  67 void ZRelocateQueue::inc_needs_attention() {
  68   const int needs_attention = Atomic::add(&_needs_attention, 1);
  69   assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
  70 }
  71 
  72 void ZRelocateQueue::dec_needs_attention() {
  73   const int needs_attention = Atomic::sub(&_needs_attention, 1);
  74   assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
  75 }
  76 
  77 void ZRelocateQueue::activate(uint nworkers) {
  78   _is_active = true;
  79   join(nworkers);
  80 }
  81 
  82 void ZRelocateQueue::deactivate() {
  83   Atomic::store(&_is_active, false);
  84   clear();
  85 }
  86 
  87 bool ZRelocateQueue::is_active() const {
  88   return Atomic::load(&_is_active);
  89 }
  90 
  91 void ZRelocateQueue::join(uint nworkers) {
  92   assert(nworkers != 0, "Must request at least one worker");
  93   assert(_nworkers == 0, "Invalid state");
  94   assert(_nsynchronized == 0, "Invalid state");
  95 
  96   log_debug(gc, reloc)("Joining workers: %u", nworkers);
  97 
  98   _nworkers = nworkers;
  99 }
 100 
 101 void ZRelocateQueue::resize_workers(uint nworkers) {
 102   assert(nworkers != 0, "Must request at least one worker");
 103   assert(_nworkers == 0, "Invalid state");
 104   assert(_nsynchronized == 0, "Invalid state");
 105 
 106   log_debug(gc, reloc)("Resize workers: %u", nworkers);
 107 
 108   ZLocker<ZConditionLock> locker(&_lock);
 109   _nworkers = nworkers;
 110 }
 111 
 112 void ZRelocateQueue::leave() {
 113   ZLocker<ZConditionLock> locker(&_lock);
 114   _nworkers--;
 115 
 116   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 117 
 118   log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
 119 
 120   // Prune done forwardings
 121   const bool forwardings_done = prune();
 122 
 123   // Check if all workers synchronized
 124   const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
 125 
 126   if (forwardings_done || last_synchronized) {
 127     _lock.notify_all();
 128   }
 129 }
 130 
 131 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
 132   ZStatTimer timer(ZCriticalPhaseRelocationStall);
 133   ZLocker<ZConditionLock> locker(&_lock);
 134 
 135   if (forwarding->is_done()) {
 136     return;
 137   }
 138 
 139   _queue.append(forwarding);
 140   if (_queue.length() == 1) {
 141     // Queue became non-empty
 142     inc_needs_attention();
 143     _lock.notify_all();
 144   }
 145 
 146   while (!forwarding->is_done()) {
 147     _lock.wait();
 148   }
 149 }
 150 
 151 bool ZRelocateQueue::prune() {
 152   if (_queue.is_empty()) {
 153     return false;
 154   }
 155 
 156   bool done = false;
 157 
 158   for (int i = 0; i < _queue.length();) {
 159     const ZForwarding* const forwarding = _queue.at(i);
 160     if (forwarding->is_done()) {
 161       done = true;
 162 
 163       _queue.delete_at(i);
 164     } else {
 165       i++;
 166     }
 167   }
 168 
 169   if (_queue.is_empty()) {
 170     dec_needs_attention();
 171   }
 172 
 173   return done;
 174 }
 175 
 176 ZForwarding* ZRelocateQueue::prune_and_claim() {
 177   if (prune()) {
 178     _lock.notify_all();
 179   }
 180 
 181   for (int i = 0; i < _queue.length(); i++) {
 182     ZForwarding* const forwarding = _queue.at(i);
 183     if (forwarding->claim()) {
 184       return forwarding;
 185     }
 186   }
 187 
 188   return nullptr;
 189 }
 190 
 191 class ZRelocateQueueSynchronizeThread {
 192 private:
 193   ZRelocateQueue* const _queue;
 194 
 195 public:
 196   ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
 197     : _queue(queue) {
 198     _queue->synchronize_thread();
 199   }
 200 
 201   ~ZRelocateQueueSynchronizeThread() {
 202     _queue->desynchronize_thread();
 203   }
 204 };
 205 
 206 void ZRelocateQueue::synchronize_thread() {
 207   _nsynchronized++;
 208 
 209   log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
 210 
 211   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 212   if (_nsynchronized == _nworkers) {
 213     // All workers synchronized
 214     _lock.notify_all();
 215   }
 216 }
 217 
 218 void ZRelocateQueue::desynchronize_thread() {
 219   _nsynchronized--;
 220 
 221   log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
 222 
 223   assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 224 }
 225 
 226 ZForwarding* ZRelocateQueue::synchronize_poll() {
 227   // Fast path avoids locking
 228   if (!needs_attention()) {
 229     return nullptr;
 230   }
 231 
 232   // Slow path to get the next forwarding and/or synchronize
 233   ZLocker<ZConditionLock> locker(&_lock);
 234 
 235   {
 236     ZForwarding* const forwarding = prune_and_claim();
 237     if (forwarding != nullptr) {
 238       // Don't become synchronized while there are elements in the queue
 239       return forwarding;
 240     }
 241   }
 242 
 243   if (!_synchronize) {
 244     return nullptr;
 245   }
 246 
 247   ZRelocateQueueSynchronizeThread rqst(this);
 248 
 249   do {
 250     _lock.wait();
 251 
 252     ZForwarding* const forwarding = prune_and_claim();
 253     if (forwarding != nullptr) {
 254       return forwarding;
 255     }
 256   } while (_synchronize);
 257 
 258   return nullptr;
 259 }
 260 
 261 void ZRelocateQueue::clear() {
 262   assert(_nworkers == 0, "Invalid state");
 263 
 264   if (_queue.is_empty()) {
 265     return;
 266   }
 267 
 268   ZArrayIterator<ZForwarding*> iter(&_queue);
 269   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 270     assert(forwarding->is_done(), "All should be done");
 271   }
 272 
 273   assert(false, "Clear was not empty");
 274 
 275   _queue.clear();
 276   dec_needs_attention();
 277 }
 278 
 279 void ZRelocateQueue::synchronize() {
 280   ZLocker<ZConditionLock> locker(&_lock);
 281   _synchronize = true;
 282 
 283   inc_needs_attention();
 284 
 285   log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 286 
 287   while (_nworkers != _nsynchronized) {
 288     _lock.wait();
 289     log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 290   }
 291 }
 292 
 293 void ZRelocateQueue::desynchronize() {
 294   ZLocker<ZConditionLock> locker(&_lock);
 295   _synchronize = false;
 296 
 297   log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
 298 
 299   assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
 300 
 301   dec_needs_attention();
 302 
 303   _lock.notify_all();
 304 }
 305 
 306 ZRelocate::ZRelocate(ZGeneration* generation)
 307   : _generation(generation),
 308     _queue() {}
 309 
 310 ZWorkers* ZRelocate::workers() const {
 311   return _generation->workers();
 312 }
 313 
 314 void ZRelocate::start() {
 315   _queue.activate(workers()->active_workers());
 316 }
 317 
 318 void ZRelocate::add_remset(volatile zpointer* p) {
 319   ZGeneration::young()->remember(p);
 320 }
 321 
 322 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
 323   assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
 324 
 325   // Allocate object
 326   const size_t size = ZUtils::object_size(from_addr);
 327 
 328   ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
 329 
 330   const zaddress to_addr = allocator->alloc_object(size);
 331 
 332   if (is_null(to_addr)) {
 333     // Allocation failed
 334     return zaddress::null;
 335   }
 336 
 337   // Copy object
 338   ZUtils::object_copy_disjoint(from_addr, to_addr, size);
 339 
 340   // Insert forwarding
 341   const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
 342 
 343   if (to_addr_final != to_addr) {
 344     // Already relocated, try undo allocation
 345     allocator->undo_alloc_object(to_addr, size);
 346   }
 347 
 348   return to_addr_final;
 349 }
 350 
 351 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 352   ZForwardingCursor cursor;
 353 
 354   // Lookup forwarding
 355   zaddress to_addr = forwarding->find(from_addr, &cursor);
 356   if (!is_null(to_addr)) {
 357     // Already relocated
 358     return to_addr;
 359   }
 360 
 361   // Relocate object
 362   if (forwarding->retain_page(&_queue)) {
 363     assert(_generation->is_phase_relocate(), "Must be");
 364     to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
 365     forwarding->release_page();
 366 
 367     if (!is_null(to_addr)) {
 368       // Success
 369       return to_addr;
 370     }
 371 
 372     // Failed to relocate object. Signal and wait for a worker thread to
 373     // complete relocation of this page, and then forward the object.
 374     _queue.add_and_wait(forwarding);
 375   }
 376 
 377   // Forward object
 378   return forward_object(forwarding, from_addr);
 379 }
 380 
 381 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
 382   const zaddress to_addr = forwarding->find(from_addr);
 383   assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
 384   return to_addr;
 385 }
 386 
 387 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) {
 388   if (ZStressRelocateInPlace) {
 389     // Simulate failure to allocate a new page. This will
 390     // cause the page being relocated to be relocated in-place.
 391     return nullptr;
 392   }
 393 
 394   ZAllocationFlags flags;
 395   flags.set_non_blocking();
 396   flags.set_gc_relocation();
 397 
 398   return allocator->alloc_page_for_relocation(type, size, flags);
 399 }
 400 
 401 static void retire_target_page(ZGeneration* generation, ZPage* page) {
 402   if (generation->is_young() && page->is_old()) {
 403     generation->increase_promoted(page->used());
 404   } else {
 405     generation->increase_compacted(page->used());
 406   }
 407 
 408   // Free target page if it is empty. We can end up with an empty target
 409   // page if we allocated a new target page, and then lost the race to
 410   // relocate the remaining objects, leaving the target page empty when
 411   // relocation completed.
 412   if (page->used() == 0) {
 413     ZHeap::heap()->free_page(page, true /* allow_defragment */);
 414   }
 415 }
 416 
 417 class ZRelocateSmallAllocator {
 418 private:
 419   ZGeneration* const _generation;
 420   volatile size_t    _in_place_count;
 421 
 422 public:
 423   ZRelocateSmallAllocator(ZGeneration* generation)
 424     : _generation(generation),
 425       _in_place_count(0) {}
 426 
 427   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 428     ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 429     ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size());
 430     if (page == nullptr) {
 431       Atomic::inc(&_in_place_count);
 432     }
 433 
 434     if (target != nullptr) {
 435       // Retire the old target page
 436       retire_target_page(_generation, target);
 437     }
 438 
 439     return page;
 440   }
 441 
 442   void share_target_page(ZPage* page) {
 443     // Does nothing
 444   }
 445 
 446   void free_target_page(ZPage* page) {
 447     if (page != nullptr) {
 448       retire_target_page(_generation, page);
 449     }
 450   }
 451 
 452   zaddress alloc_object(ZPage* page, size_t size) const {
 453     return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
 454   }
 455 
 456   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 457     page->undo_alloc_object(addr, size);
 458   }
 459 
 460   size_t in_place_count() const {
 461     return _in_place_count;
 462   }
 463 };
 464 
 465 class ZRelocateMediumAllocator {
 466 private:
 467   ZGeneration* const _generation;
 468   ZConditionLock     _lock;
 469   ZPage*             _shared[ZAllocator::_relocation_allocators];
 470   bool               _in_place;
 471   volatile size_t    _in_place_count;
 472 
 473 public:
 474   ZRelocateMediumAllocator(ZGeneration* generation)
 475     : _generation(generation),
 476       _lock(),
 477       _shared(),
 478       _in_place(false),
 479       _in_place_count(0) {}
 480 
 481   ~ZRelocateMediumAllocator() {
 482     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 483       if (_shared[i] != nullptr) {
 484         retire_target_page(_generation, _shared[i]);
 485       }
 486     }
 487   }
 488 
 489   ZPage* shared(ZPageAge age) {
 490     return _shared[static_cast<uint>(age) - 1];
 491   }
 492 
 493   void set_shared(ZPageAge age, ZPage* page) {
 494     _shared[static_cast<uint>(age) - 1] = page;
 495   }
 496 
 497   ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
 498     ZLocker<ZConditionLock> locker(&_lock);
 499 
 500     // Wait for any ongoing in-place relocation to complete
 501     while (_in_place) {
 502       _lock.wait();
 503     }
 504 
 505     // Allocate a new page only if the shared page is the same as the
 506     // current target page. The shared page will be different from the
 507     // current target page if another thread shared a page, or allocated
 508     // a new page.
 509     const ZPageAge to_age = forwarding->to_age();
 510     if (shared(to_age) == target) {
 511       ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
 512       ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size());
 513       set_shared(to_age, to_page);
 514       if (to_page == nullptr) {
 515         Atomic::inc(&_in_place_count);
 516         _in_place = true;
 517       }
 518 
 519       // This thread is responsible for retiring the shared target page
 520       if (target != nullptr) {
 521         retire_target_page(_generation, target);
 522       }
 523     }
 524 
 525     return shared(to_age);
 526   }
 527 
 528   void share_target_page(ZPage* page) {
 529     const ZPageAge age = page->age();
 530 
 531     ZLocker<ZConditionLock> locker(&_lock);
 532     assert(_in_place, "Invalid state");
 533     assert(shared(age) == nullptr, "Invalid state");
 534     assert(page != nullptr, "Invalid page");
 535 
 536     set_shared(age, page);
 537     _in_place = false;
 538 
 539     _lock.notify_all();
 540   }
 541 
 542   void free_target_page(ZPage* page) {
 543     // Does nothing
 544   }
 545 
 546   zaddress alloc_object(ZPage* page, size_t size) const {
 547     return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
 548   }
 549 
 550   void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
 551     page->undo_alloc_object_atomic(addr, size);
 552   }
 553 
 554   size_t in_place_count() const {
 555     return _in_place_count;
 556   }
 557 };
 558 
 559 template <typename Allocator>
 560 class ZRelocateWork : public StackObj {
 561 private:
 562   Allocator* const   _allocator;
 563   ZForwarding*       _forwarding;
 564   ZPage*             _target[ZAllocator::_relocation_allocators];
 565   ZGeneration* const _generation;
 566   size_t             _other_promoted;
 567   size_t             _other_compacted;
 568 
 569   ZPage* target(ZPageAge age) {
 570     return _target[static_cast<uint>(age) - 1];
 571   }
 572 
 573   void set_target(ZPageAge age, ZPage* page) {
 574     _target[static_cast<uint>(age) - 1] = page;
 575   }
 576 
 577   size_t object_alignment() const {
 578     return (size_t)1 << _forwarding->object_alignment_shift();
 579   }
 580 
 581   void increase_other_forwarded(size_t unaligned_object_size) {
 582     const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
 583     if (_forwarding->is_promotion()) {
 584       _other_promoted += aligned_size;
 585     } else {
 586       _other_compacted += aligned_size;
 587     }
 588   }
 589 
 590   zaddress try_relocate_object_inner(zaddress from_addr) {
 591     ZForwardingCursor cursor;
 592 
 593     const size_t size = ZUtils::object_size(from_addr);
 594     ZPage* const to_page = target(_forwarding->to_age());
 595 
 596     // Lookup forwarding
 597     {
 598       const zaddress to_addr = _forwarding->find(from_addr, &cursor);
 599       if (!is_null(to_addr)) {
 600         // Already relocated
 601         increase_other_forwarded(size);
 602         return to_addr;
 603       }
 604     }
 605 
 606     // Allocate object
 607     const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
 608     if (is_null(allocated_addr)) {
 609       // Allocation failed
 610       return zaddress::null;
 611     }
 612 
 613     // Copy object. Use conjoint copying if we are relocating
 614     // in-place and the new object overlaps with the old object.
 615     if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
 616       ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
 617     } else {
 618       ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
 619     }
 620 
 621     // Insert forwarding
 622     const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
 623     if (to_addr != allocated_addr) {
 624       // Already relocated, undo allocation
 625       _allocator->undo_alloc_object(to_page, to_addr, size);
 626       increase_other_forwarded(size);
 627     }
 628 
 629     return to_addr;
 630   }
 631 
 632   void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
 633     // Old-to-old relocation - move existing remset bits
 634 
 635     // If this is called for an in-place relocated page, then this code has the
 636     // responsibility to clear the old remset bits. Extra care is needed because:
 637     //
 638     // 1) The to-object copy can overlap with the from-object copy
 639     // 2) Remset bits of old objects need to be cleared
 640     //
 641     // A watermark is used to keep track of how far the old remset bits have been removed.
 642 
 643     const bool in_place = _forwarding->in_place_relocation();
 644     ZPage* const from_page = _forwarding->page();
 645     const uintptr_t from_local_offset = from_page->local_offset(from_addr);
 646 
 647     // Note: even with in-place relocation, the to_page could be another page
 648     ZPage* const to_page = ZHeap::heap()->page(to_addr);
 649 
 650     // Uses _relaxed version to handle that in-place relocation resets _top
 651     assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
 652     assert(to_page->is_in(to_addr), "Must be");
 653 
 654 
 655     // Read the size from the to-object, since the from-object
 656     // could have been overwritten during in-place relocation.
 657     const size_t size = ZUtils::object_size(to_addr);
 658 
 659     // If a young generation collection started while the old generation
 660     // relocated  objects, the remember set bits were flipped from "current"
 661     // to "previous".
 662     //
 663     // We need to select the correct remembered sets bitmap to ensure that the
 664     // old remset bits are found.
 665     //
 666     // Note that if the young generation marking (remset scanning) finishes
 667     // before the old generation relocation has relocated this page, then the
 668     // young generation will visit this page's previous remembered set bits and
 669     // moved them over to the current bitmap.
 670     //
 671     // If the young generation runs multiple cycles while the old generation is
 672     // relocating, then the first cycle will have consume the the old remset,
 673     // bits and moved associated objects to a new old page. The old relocation
 674     // could find either the the two bitmaps. So, either it will find the original
 675     // remset bits for the page, or it will find an empty bitmap for the page. It
 676     // doesn't matter for correctness, because the young generation marking has
 677     // already taken care of the bits.
 678 
 679     const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
 680 
 681     // When in-place relocation is done and the old remset bits are located in
 682     // the bitmap that is going to be used for the new remset bits, then we
 683     // need to clear the old bits before the new bits are inserted.
 684     const bool iterate_current_remset = active_remset_is_current && !in_place;
 685 
 686     BitMap::Iterator iter = iterate_current_remset
 687         ? from_page->remset_iterator_limited_current(from_local_offset, size)
 688         : from_page->remset_iterator_limited_previous(from_local_offset, size);
 689 
 690     for (BitMap::idx_t field_bit : iter) {
 691       const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
 692 
 693       // Add remset entry in the to-page
 694       const uintptr_t offset = field_local_offset - from_local_offset;
 695       const zaddress to_field = to_addr + offset;
 696       log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
 697           untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
 698 
 699       volatile zpointer* const p = (volatile zpointer*)to_field;
 700 
 701       if (ZGeneration::young()->is_phase_mark()) {
 702         // Young generation remembered set scanning needs to know about this
 703         // field. It will take responsibility to add a new remember set entry if needed.
 704         _forwarding->relocated_remembered_fields_register(p);
 705       } else {
 706         to_page->remember(p);
 707         if (in_place) {
 708           assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
 709         }
 710       }
 711     }
 712   }
 713 
 714   static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
 715     if (ZHeap::heap()->is_young(addr)) {
 716       ZRelocate::add_remset(p);
 717       return true;
 718     }
 719 
 720     return false;
 721   }
 722 
 723   static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
 724     const zpointer ptr = Atomic::load(p);
 725 
 726     assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
 727 
 728     if (ZPointer::is_store_good(ptr)) {
 729       // Already has a remset entry
 730       return;
 731     }
 732 
 733     if (ZPointer::is_load_good(ptr)) {
 734       if (!is_null_any(ptr)) {
 735         const zaddress addr = ZPointer::uncolor(ptr);
 736         add_remset_if_young(p, addr);
 737       }
 738       // No need to remap it is already load good
 739       return;
 740     }
 741 
 742     if (is_null_any(ptr)) {
 743       // Eagerly remap to skip adding a remset entry just to get deferred remapping
 744       ZBarrier::remap_young_relocated(p, ptr);
 745       return;
 746     }
 747 
 748     const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
 749     ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
 750 
 751     if (forwarding == nullptr) {
 752       // Object isn't being relocated
 753       const zaddress addr = safe(addr_unsafe);
 754       if (!add_remset_if_young(p, addr)) {
 755         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 756         ZBarrier::remap_young_relocated(p, ptr);
 757       }
 758       return;
 759     }
 760 
 761     const zaddress addr = forwarding->find(addr_unsafe);
 762 
 763     if (!is_null(addr)) {
 764       // Object has already been relocated
 765       if (!add_remset_if_young(p, addr)) {
 766         // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
 767         ZBarrier::remap_young_relocated(p, ptr);
 768       }
 769       return;
 770     }
 771 
 772     // Object has not been relocated yet
 773     // Don't want to eagerly relocate objects, so just add a remset
 774     ZRelocate::add_remset(p);
 775     return;
 776   }
 777 
 778   void update_remset_promoted(zaddress to_addr) const {
 779     ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
 780   }
 781 
 782   void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
 783     if (_forwarding->to_age() != ZPageAge::old) {
 784       // No remembered set in young pages
 785       return;
 786     }
 787 
 788     // Need to deal with remset when moving objects to the old generation
 789     if (_forwarding->from_age() == ZPageAge::old) {
 790       update_remset_old_to_old(from_addr, to_addr);
 791       return;
 792     }
 793 
 794     // Normal promotion
 795     update_remset_promoted(to_addr);
 796   }
 797 
 798   bool try_relocate_object(zaddress from_addr) {
 799     const zaddress to_addr = try_relocate_object_inner(from_addr);
 800 
 801     if (is_null(to_addr)) {
 802       return false;
 803     }
 804 
 805     update_remset_for_fields(from_addr, to_addr);
 806 
 807     return true;
 808   }
 809 
 810   void start_in_place_relocation_prepare_remset(ZPage* from_page) {
 811     if (_forwarding->from_age() != ZPageAge::old) {
 812       // Only old pages have use remset bits
 813       return;
 814     }
 815 
 816     if (ZGeneration::old()->active_remset_is_current()) {
 817       // We want to iterate over and clear the remset bits of the from-space page,
 818       // and insert current bits in the to-space page. However, with in-place
 819       // relocation, the from-space and to-space pages are the same. Clearing
 820       // is destructive, and is difficult to perform before or during the iteration.
 821       // However, clearing of the current bits has to be done before exposing the
 822       // to-space objects in the forwarding table.
 823       //
 824       // To solve this tricky dependency problem, we start by stashing away the
 825       // current bits in the previous bits, and clearing the current bits
 826       // (implemented by swapping the bits). This way, the current bits are
 827       // cleared before copying the objects (like a normal to-space page),
 828       // and the previous bits are representing a copy of the current bits
 829       // of the from-space page, and are used for iteration.
 830       from_page->swap_remset_bitmaps();
 831     }
 832   }
 833 
 834   ZPage* start_in_place_relocation(zoffset relocated_watermark) {
 835     _forwarding->in_place_relocation_claim_page();
 836     _forwarding->in_place_relocation_start(relocated_watermark);
 837 
 838     ZPage* const from_page = _forwarding->page();
 839 
 840     const ZPageAge to_age = _forwarding->to_age();
 841     const bool promotion = _forwarding->is_promotion();
 842 
 843     // Promotions happen through a new cloned page
 844     ZPage* const to_page = promotion ? from_page->clone_limited() : from_page;
 845 
 846     // Reset page for in-place relocation
 847     to_page->reset(to_age);
 848     to_page->reset_top_for_allocation();
 849     if (promotion) {
 850       to_page->remset_alloc();
 851     }
 852 
 853     // Verify that the inactive remset is clear when resetting the page for
 854     // in-place relocation.
 855     if (from_page->age() == ZPageAge::old) {
 856       if (ZGeneration::old()->active_remset_is_current()) {
 857         to_page->verify_remset_cleared_previous();
 858       } else {
 859         to_page->verify_remset_cleared_current();
 860       }
 861     }
 862 
 863     // Clear remset bits for all objects that were relocated
 864     // before this page became an in-place relocated page.
 865     start_in_place_relocation_prepare_remset(from_page);
 866 
 867     if (promotion) {
 868       // Register the the promotion
 869       ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
 870       ZGeneration::young()->register_in_place_relocate_promoted(from_page);
 871     }
 872 
 873     return to_page;
 874   }
 875 
 876   void relocate_object(oop obj) {
 877     const zaddress addr = to_zaddress(obj);
 878     assert(ZHeap::heap()->is_object_live(addr), "Should be live");
 879 
 880     while (!try_relocate_object(addr)) {
 881       // Allocate a new target page, or if that fails, use the page being
 882       // relocated as the new target, which will cause it to be relocated
 883       // in-place.
 884       const ZPageAge to_age = _forwarding->to_age();
 885       ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age));
 886       set_target(to_age, to_page);
 887       if (to_page != nullptr) {
 888         continue;
 889       }
 890 
 891       // Start in-place relocation to block other threads from accessing
 892       // the page, or its forwarding table, until it has been released
 893       // (relocation completed).
 894       to_page = start_in_place_relocation(ZAddress::offset(addr));
 895       set_target(to_age, to_page);
 896     }
 897   }
 898 
 899 public:
 900   ZRelocateWork(Allocator* allocator, ZGeneration* generation)
 901     : _allocator(allocator),
 902       _forwarding(nullptr),
 903       _target(),
 904       _generation(generation),
 905       _other_promoted(0),
 906       _other_compacted(0) {}
 907 
 908   ~ZRelocateWork() {
 909     for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
 910       _allocator->free_target_page(_target[i]);
 911     }
 912     // Report statistics on-behalf of non-worker threads
 913     _generation->increase_promoted(_other_promoted);
 914     _generation->increase_compacted(_other_compacted);
 915   }
 916 
 917   bool active_remset_is_current() const {
 918     // Normal old-to-old relocation can treat the from-page remset as a
 919     // read-only copy, and then copy over the appropriate remset bits to the
 920     // cleared to-page's 'current' remset bitmap.
 921     //
 922     // In-place relocation is more complicated. Since, the same page is both
 923     // a from-page and a to-page, we need to remove the old remset bits, and
 924     // add remset bits that corresponds to the new locations of the relocated
 925     // objects.
 926     //
 927     // Depending on how long ago (in terms of number of young GC's and the
 928     // current young GC's phase), the page was allocated, the active
 929     // remembered set will be in either the 'current' or 'previous' bitmap.
 930     //
 931     // If the active bits are in the 'previous' bitmap, we know that the
 932     // 'current' bitmap was cleared at some earlier point in time, and we can
 933     // simply set new bits in 'current' bitmap, and later when relocation has
 934     // read all the old remset bits, we could just clear the 'previous' remset
 935     // bitmap.
 936     //
 937     // If, on the other hand, the active bits are in the 'current' bitmap, then
 938     // that bitmap will be used to both read the old remset bits, and the
 939     // destination for the remset bits that we copy when an object is copied
 940     // to it's new location within the page. We need to *carefully* remove all
 941     // all old remset bits, without clearing out the newly set bits.
 942     return ZGeneration::old()->active_remset_is_current();
 943   }
 944 
 945   void clear_remset_before_in_place_reuse(ZPage* page) {
 946     if (_forwarding->from_age() != ZPageAge::old) {
 947       // No remset bits
 948       return;
 949     }
 950 
 951     // Clear 'previous' remset bits. For in-place relocated pages, the previous
 952     // remset bits are always used, even when active_remset_is_current().
 953     page->clear_remset_previous();
 954   }
 955 
 956   void finish_in_place_relocation() {
 957     // We are done with the from_space copy of the page
 958     _forwarding->in_place_relocation_finish();
 959   }
 960 
 961   void do_forwarding(ZForwarding* forwarding) {
 962     _forwarding = forwarding;
 963 
 964     _forwarding->page()->log_msg(" (relocate page)");
 965 
 966     ZVerify::before_relocation(_forwarding);
 967 
 968     // Relocate objects
 969     _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
 970 
 971     ZVerify::after_relocation(_forwarding);
 972 
 973     // Verify
 974     if (ZVerifyForwarding) {
 975       _forwarding->verify();
 976     }
 977 
 978     _generation->increase_freed(_forwarding->page()->size());
 979 
 980     // Deal with in-place relocation
 981     const bool in_place = _forwarding->in_place_relocation();
 982     if (in_place) {
 983       finish_in_place_relocation();
 984     }
 985 
 986     // Old from-space pages need to deal with remset bits
 987     if (_forwarding->from_age() == ZPageAge::old) {
 988       _forwarding->relocated_remembered_fields_after_relocate();
 989     }
 990 
 991     // Release relocated page
 992     _forwarding->release_page();
 993 
 994     if (in_place) {
 995       // Wait for all other threads to call release_page
 996       ZPage* const page = _forwarding->detach_page();
 997 
 998       // Ensure that previous remset bits are cleared
 999       clear_remset_before_in_place_reuse(page);
1000 
1001       page->log_msg(" (relocate page done in-place)");
1002 
1003       // Different pages when promoting
1004       ZPage* const target_page = target(_forwarding->to_age());
1005       _allocator->share_target_page(target_page);
1006 
1007     } else {
1008       // Wait for all other threads to call release_page
1009       ZPage* const page = _forwarding->detach_page();
1010 
1011       page->log_msg(" (relocate page done normal)");
1012 
1013       // Free page
1014       ZHeap::heap()->free_page(page, true /* allow_defragment */);
1015     }
1016   }
1017 };
1018 
1019 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1020 public:
1021   virtual void do_thread(Thread* thread) {
1022     JavaThread* const jt = JavaThread::cast(thread);
1023     ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1024     buffer->install_base_pointers();
1025   }
1026 };
1027 
1028 // Installs the object base pointers (object starts), for the fields written
1029 // in the store buffer. The code that searches for the object start uses that
1030 // liveness information stored in the pages. That information is lost when the
1031 // pages have been relocated and then destroyed.
1032 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1033 private:
1034   ZJavaThreadsIterator _threads_iter;
1035 
1036 public:
1037   ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1038     : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1039       _threads_iter(generation->id_optional()) {}
1040 
1041   virtual void work() {
1042     ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1043     _threads_iter.apply(&fix_store_buffer_cl);
1044   }
1045 };
1046 
1047 class ZRelocateTask : public ZRestartableTask {
1048 private:
1049   ZRelocationSetParallelIterator _iter;
1050   ZGeneration* const             _generation;
1051   ZRelocateQueue* const          _queue;
1052   ZRelocateSmallAllocator        _small_allocator;
1053   ZRelocateMediumAllocator       _medium_allocator;
1054 
1055 public:
1056   ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
1057     : ZRestartableTask("ZRelocateTask"),
1058       _iter(relocation_set),
1059       _generation(relocation_set->generation()),
1060       _queue(queue),
1061       _small_allocator(_generation),
1062       _medium_allocator(_generation) {}
1063 
1064   ~ZRelocateTask() {
1065     _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1066 
1067     // Signal that we're not using the queue anymore. Used mostly for asserts.
1068     _queue->deactivate();
1069   }
1070 
1071   virtual void work() {
1072     ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation);
1073     ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation);
1074 
1075     const auto do_forwarding = [&](ZForwarding* forwarding) {
1076       ZPage* const page = forwarding->page();
1077       if (page->is_small()) {
1078         small.do_forwarding(forwarding);
1079       } else {
1080         medium.do_forwarding(forwarding);
1081       }
1082 
1083       // Absolute last thing done while relocating a page.
1084       //
1085       // We don't use the SuspendibleThreadSet when relocating pages.
1086       // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1087       //
1088       // After the mark_done call a safepointing could be completed and a
1089       // new GC phase could be entered.
1090       forwarding->mark_done();
1091     };
1092 
1093     const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1094       if (forwarding->claim()) {
1095         do_forwarding(forwarding);
1096       }
1097     };
1098 
1099     const auto do_forwarding_one_from_iter = [&]() {
1100       ZForwarding* forwarding;
1101 
1102       if (_iter.next(&forwarding)) {
1103         claim_and_do_forwarding(forwarding);
1104         return true;
1105       }
1106 
1107       return false;
1108     };
1109 
1110     for (;;) {
1111       // As long as there are requests in the relocate queue, there are threads
1112       // waiting in a VM state that does not allow them to be blocked. The
1113       // worker thread needs to finish relocate these pages, and allow the
1114       // other threads to continue and proceed to a blocking state. After that,
1115       // the worker threads are allowed to safepoint synchronize.
1116       for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1117         do_forwarding(forwarding);
1118       }
1119 
1120       if (!do_forwarding_one_from_iter()) {
1121         // No more work
1122         break;
1123       }
1124 
1125       if (_generation->should_worker_resize()) {
1126         break;
1127       }
1128     }
1129 
1130     _queue->leave();
1131   }
1132 
1133   virtual void resize_workers(uint nworkers) {
1134     _queue->resize_workers(nworkers);
1135   }
1136 };
1137 
1138 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1139   const zpointer ptr = Atomic::load(p);
1140 
1141   if (ZPointer::is_store_good(ptr)) {
1142     // Already has a remset entry
1143     return;
1144   }
1145 
1146   // Remset entries are used for two reasons:
1147   // 1) Young marking old-to-young pointer roots
1148   // 2) Deferred remapping of stale old-to-young pointers
1149   //
1150   // This load barrier will up-front perform the remapping of (2),
1151   // and the code below only has to make sure we register up-to-date
1152   // old-to-young pointers for (1).
1153   const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1154 
1155   if (is_null(addr)) {
1156     // No need for remset entries for null pointers
1157     return;
1158   }
1159 
1160   if (ZHeap::heap()->is_old(addr)) {
1161     // No need for remset entries for pointers to old gen
1162     return;
1163   }
1164 
1165   ZRelocate::add_remset(p);
1166 }
1167 
1168 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1169 private:
1170   ZStatTimerYoung                _timer;
1171   ZArrayParallelIterator<ZPage*> _iter;
1172 
1173 public:
1174   ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1175     : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1176       _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1177       _iter(pages) {}
1178 
1179   virtual void work() {
1180     SuspendibleThreadSetJoiner sts_joiner;
1181 
1182     for (ZPage* page; _iter.next(&page);) {
1183       page->object_iterate([&](oop obj) {
1184         ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1185       });
1186 
1187       SuspendibleThreadSet::yield();
1188       if (ZGeneration::young()->should_worker_resize()) {
1189         return;
1190       }
1191     }
1192   }
1193 };
1194 
1195 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1196   {
1197     // Install the store buffer's base pointers before the
1198     // relocate task destroys the liveness information in
1199     // the relocated pages.
1200     ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1201     workers()->run(&buffer_task);
1202   }
1203 
1204   {
1205     ZRelocateTask relocate_task(relocation_set, &_queue);
1206     workers()->run(&relocate_task);
1207   }
1208 
1209   if (relocation_set->generation()->is_young()) {
1210     ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1211     workers()->run(&task);
1212   }
1213 }
1214 
1215 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1216   if (from_age == ZPageAge::old) {
1217     return ZPageAge::old;
1218   }
1219 
1220   const uint age = static_cast<uint>(from_age);
1221   if (age >= ZGeneration::young()->tenuring_threshold()) {
1222     return ZPageAge::old;
1223   }
1224 
1225   return static_cast<ZPageAge>(age + 1);
1226 }
1227 
1228 class ZFlipAgePagesTask : public ZTask {
1229 private:
1230   ZArrayParallelIterator<ZPage*> _iter;
1231 
1232 public:
1233   ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1234     : ZTask("ZPromotePagesTask"),
1235       _iter(pages) {}
1236 
1237   virtual void work() {
1238     SuspendibleThreadSetJoiner sts_joiner;
1239     ZArray<ZPage*> promoted_pages;
1240 
1241     for (ZPage* prev_page; _iter.next(&prev_page);) {
1242       const ZPageAge from_age = prev_page->age();
1243       const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1244       assert(from_age != ZPageAge::old, "invalid age for a young collection");
1245 
1246       // Figure out if this is proper promotion
1247       const bool promotion = to_age == ZPageAge::old;
1248 
1249       if (promotion) {
1250         // Before promoting an object (and before relocate start), we must ensure that all
1251         // contained zpointers are store good. The marking code ensures that for non-null
1252         // pointers, but null pointers are ignored. This code ensures that even null pointers
1253         // are made store good, for the promoted objects.
1254         prev_page->object_iterate([&](oop obj) {
1255           ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1256         });
1257       }
1258 
1259       // Logging
1260       prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1261 
1262       // Setup to-space page
1263       ZPage* const new_page = promotion ? prev_page->clone_limited() : prev_page;
1264 
1265       // Reset page for flip aging
1266       new_page->reset(to_age);
1267       new_page->reset_livemap();
1268       if (promotion) {
1269         new_page->remset_alloc();
1270       }
1271 
1272       if (promotion) {
1273         ZGeneration::young()->flip_promote(prev_page, new_page);
1274         // Defer promoted page registration times the lock is taken
1275         promoted_pages.push(prev_page);
1276       }
1277 
1278       SuspendibleThreadSet::yield();
1279     }
1280 
1281     ZGeneration::young()->register_flip_promoted(promoted_pages);
1282   }
1283 };
1284 
1285 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1286   ZFlipAgePagesTask flip_age_task(pages);
1287   workers()->run(&flip_age_task);
1288 }
1289 
1290 void ZRelocate::synchronize() {
1291   _queue.synchronize();
1292 }
1293 
1294 void ZRelocate::desynchronize() {
1295   _queue.desynchronize();
1296 }
1297 
1298 ZRelocateQueue* ZRelocate::queue() {
1299   return &_queue;
1300 }
1301 
1302 bool ZRelocate::is_queue_active() const {
1303   return _queue.is_active();
1304 }