1 /* 2 * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/shared/gc_globals.hpp" 26 #include "gc/shared/suspendibleThreadSet.hpp" 27 #include "gc/z/zAbort.inline.hpp" 28 #include "gc/z/zAddress.inline.hpp" 29 #include "gc/z/zAllocator.inline.hpp" 30 #include "gc/z/zBarrier.inline.hpp" 31 #include "gc/z/zCollectedHeap.hpp" 32 #include "gc/z/zForwarding.inline.hpp" 33 #include "gc/z/zGeneration.inline.hpp" 34 #include "gc/z/zHeap.inline.hpp" 35 #include "gc/z/zIndexDistributor.inline.hpp" 36 #include "gc/z/zIterator.inline.hpp" 37 #include "gc/z/zPage.inline.hpp" 38 #include "gc/z/zPageAge.hpp" 39 #include "gc/z/zRelocate.hpp" 40 #include "gc/z/zRelocationSet.inline.hpp" 41 #include "gc/z/zRootsIterator.hpp" 42 #include "gc/z/zStackWatermark.hpp" 43 #include "gc/z/zStat.hpp" 44 #include "gc/z/zTask.hpp" 45 #include "gc/z/zUncoloredRoot.inline.hpp" 46 #include "gc/z/zVerify.hpp" 47 #include "gc/z/zWorkers.hpp" 48 #include "prims/jvmtiTagMap.hpp" 49 #include "runtime/atomic.hpp" 50 #include "utilities/debug.hpp" 51 52 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall"); 53 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young); 54 55 ZRelocateQueue::ZRelocateQueue() 56 : _lock(), 57 _queue(), 58 _nworkers(0), 59 _nsynchronized(0), 60 _synchronize(false), 61 _is_active(false), 62 _needs_attention(0) {} 63 64 bool ZRelocateQueue::needs_attention() const { 65 return Atomic::load(&_needs_attention) != 0; 66 } 67 68 void ZRelocateQueue::inc_needs_attention() { 69 const int needs_attention = Atomic::add(&_needs_attention, 1); 70 assert(needs_attention == 1 || needs_attention == 2, "Invalid state"); 71 } 72 73 void ZRelocateQueue::dec_needs_attention() { 74 const int needs_attention = Atomic::sub(&_needs_attention, 1); 75 assert(needs_attention == 0 || needs_attention == 1, "Invalid state"); 76 } 77 78 void ZRelocateQueue::activate(uint nworkers) { 79 _is_active = true; 80 join(nworkers); 81 } 82 83 void ZRelocateQueue::deactivate() { 84 Atomic::store(&_is_active, false); 85 clear(); 86 } 87 88 bool ZRelocateQueue::is_active() const { 89 return Atomic::load(&_is_active); 90 } 91 92 void ZRelocateQueue::join(uint nworkers) { 93 assert(nworkers != 0, "Must request at least one worker"); 94 assert(_nworkers == 0, "Invalid state"); 95 assert(_nsynchronized == 0, "Invalid state"); 96 97 log_debug(gc, reloc)("Joining workers: %u", nworkers); 98 99 _nworkers = nworkers; 100 } 101 102 void ZRelocateQueue::resize_workers(uint nworkers) { 103 assert(nworkers != 0, "Must request at least one worker"); 104 assert(_nworkers == 0, "Invalid state"); 105 assert(_nsynchronized == 0, "Invalid state"); 106 107 log_debug(gc, reloc)("Resize workers: %u", nworkers); 108 109 ZLocker<ZConditionLock> locker(&_lock); 110 _nworkers = nworkers; 111 } 112 113 void ZRelocateQueue::leave() { 114 ZLocker<ZConditionLock> locker(&_lock); 115 _nworkers--; 116 117 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers); 118 119 log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized); 120 121 // Prune done forwardings 122 const bool forwardings_done = prune(); 123 124 // Check if all workers synchronized 125 const bool last_synchronized = _synchronize && _nworkers == _nsynchronized; 126 127 if (forwardings_done || last_synchronized) { 128 _lock.notify_all(); 129 } 130 } 131 132 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) { 133 ZStatTimer timer(ZCriticalPhaseRelocationStall); 134 ZLocker<ZConditionLock> locker(&_lock); 135 136 if (forwarding->is_done()) { 137 return; 138 } 139 140 _queue.append(forwarding); 141 if (_queue.length() == 1) { 142 // Queue became non-empty 143 inc_needs_attention(); 144 _lock.notify_all(); 145 } 146 147 while (!forwarding->is_done()) { 148 _lock.wait(); 149 } 150 } 151 152 bool ZRelocateQueue::prune() { 153 if (_queue.is_empty()) { 154 return false; 155 } 156 157 bool done = false; 158 159 for (int i = 0; i < _queue.length();) { 160 const ZForwarding* const forwarding = _queue.at(i); 161 if (forwarding->is_done()) { 162 done = true; 163 164 _queue.delete_at(i); 165 } else { 166 i++; 167 } 168 } 169 170 if (_queue.is_empty()) { 171 dec_needs_attention(); 172 } 173 174 return done; 175 } 176 177 ZForwarding* ZRelocateQueue::prune_and_claim() { 178 if (prune()) { 179 _lock.notify_all(); 180 } 181 182 for (int i = 0; i < _queue.length(); i++) { 183 ZForwarding* const forwarding = _queue.at(i); 184 if (forwarding->claim()) { 185 return forwarding; 186 } 187 } 188 189 return nullptr; 190 } 191 192 class ZRelocateQueueSynchronizeThread { 193 private: 194 ZRelocateQueue* const _queue; 195 196 public: 197 ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue) 198 : _queue(queue) { 199 _queue->synchronize_thread(); 200 } 201 202 ~ZRelocateQueueSynchronizeThread() { 203 _queue->desynchronize_thread(); 204 } 205 }; 206 207 void ZRelocateQueue::synchronize_thread() { 208 _nsynchronized++; 209 210 log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized); 211 212 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers); 213 if (_nsynchronized == _nworkers) { 214 // All workers synchronized 215 _lock.notify_all(); 216 } 217 } 218 219 void ZRelocateQueue::desynchronize_thread() { 220 _nsynchronized--; 221 222 log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized); 223 224 assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers); 225 } 226 227 ZForwarding* ZRelocateQueue::synchronize_poll() { 228 // Fast path avoids locking 229 if (!needs_attention()) { 230 return nullptr; 231 } 232 233 // Slow path to get the next forwarding and/or synchronize 234 ZLocker<ZConditionLock> locker(&_lock); 235 236 { 237 ZForwarding* const forwarding = prune_and_claim(); 238 if (forwarding != nullptr) { 239 // Don't become synchronized while there are elements in the queue 240 return forwarding; 241 } 242 } 243 244 if (!_synchronize) { 245 return nullptr; 246 } 247 248 ZRelocateQueueSynchronizeThread rqst(this); 249 250 do { 251 _lock.wait(); 252 253 ZForwarding* const forwarding = prune_and_claim(); 254 if (forwarding != nullptr) { 255 return forwarding; 256 } 257 } while (_synchronize); 258 259 return nullptr; 260 } 261 262 void ZRelocateQueue::clear() { 263 assert(_nworkers == 0, "Invalid state"); 264 265 if (_queue.is_empty()) { 266 return; 267 } 268 269 ZArrayIterator<ZForwarding*> iter(&_queue); 270 for (ZForwarding* forwarding; iter.next(&forwarding);) { 271 assert(forwarding->is_done(), "All should be done"); 272 } 273 274 assert(false, "Clear was not empty"); 275 276 _queue.clear(); 277 dec_needs_attention(); 278 } 279 280 void ZRelocateQueue::synchronize() { 281 ZLocker<ZConditionLock> locker(&_lock); 282 _synchronize = true; 283 284 inc_needs_attention(); 285 286 log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized); 287 288 while (_nworkers != _nsynchronized) { 289 _lock.wait(); 290 log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized); 291 } 292 } 293 294 void ZRelocateQueue::desynchronize() { 295 ZLocker<ZConditionLock> locker(&_lock); 296 _synchronize = false; 297 298 log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized); 299 300 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers); 301 302 dec_needs_attention(); 303 304 _lock.notify_all(); 305 } 306 307 ZRelocate::ZRelocate(ZGeneration* generation) 308 : _generation(generation), 309 _queue() {} 310 311 ZWorkers* ZRelocate::workers() const { 312 return _generation->workers(); 313 } 314 315 void ZRelocate::start() { 316 _queue.activate(workers()->active_workers()); 317 } 318 319 void ZRelocate::add_remset(volatile zpointer* p) { 320 ZGeneration::young()->remember(p); 321 } 322 323 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) { 324 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live"); 325 326 // Allocate object 327 const size_t size = ZUtils::object_size(from_addr); 328 329 ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age()); 330 331 const zaddress to_addr = allocator->alloc_object(size); 332 333 if (is_null(to_addr)) { 334 // Allocation failed 335 return zaddress::null; 336 } 337 338 // Copy object 339 ZUtils::object_copy_disjoint(from_addr, to_addr, size); 340 341 // Insert forwarding 342 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor); 343 344 if (to_addr_final != to_addr) { 345 // Already relocated, try undo allocation 346 allocator->undo_alloc_object(to_addr, size); 347 } 348 349 return to_addr_final; 350 } 351 352 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) { 353 ZForwardingCursor cursor; 354 355 // Lookup forwarding 356 zaddress to_addr = forwarding->find(from_addr, &cursor); 357 if (!is_null(to_addr)) { 358 // Already relocated 359 return to_addr; 360 } 361 362 // Relocate object 363 if (forwarding->retain_page(&_queue)) { 364 assert(_generation->is_phase_relocate(), "Must be"); 365 to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor); 366 forwarding->release_page(); 367 368 if (!is_null(to_addr)) { 369 // Success 370 return to_addr; 371 } 372 373 // Failed to relocate object. Signal and wait for a worker thread to 374 // complete relocation of this page, and then forward the object. 375 _queue.add_and_wait(forwarding); 376 } 377 378 // Forward object 379 return forward_object(forwarding, from_addr); 380 } 381 382 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) { 383 const zaddress to_addr = forwarding->find(from_addr); 384 assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr)); 385 return to_addr; 386 } 387 388 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) { 389 if (ZStressRelocateInPlace) { 390 // Simulate failure to allocate a new page. This will 391 // cause the page being relocated to be relocated in-place. 392 return nullptr; 393 } 394 395 ZAllocationFlags flags; 396 flags.set_non_blocking(); 397 flags.set_gc_relocation(); 398 399 return allocator->alloc_page_for_relocation(type, size, flags); 400 } 401 402 static void retire_target_page(ZGeneration* generation, ZPage* page) { 403 if (generation->is_young() && page->is_old()) { 404 generation->increase_promoted(page->used()); 405 } else { 406 generation->increase_compacted(page->used()); 407 } 408 409 // Free target page if it is empty. We can end up with an empty target 410 // page if we allocated a new target page, and then lost the race to 411 // relocate the remaining objects, leaving the target page empty when 412 // relocation completed. 413 if (page->used() == 0) { 414 ZHeap::heap()->free_page(page); 415 } 416 } 417 418 class ZRelocateSmallAllocator { 419 private: 420 ZGeneration* const _generation; 421 volatile size_t _in_place_count; 422 423 public: 424 ZRelocateSmallAllocator(ZGeneration* generation) 425 : _generation(generation), 426 _in_place_count(0) {} 427 428 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) { 429 ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age()); 430 ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size()); 431 if (page == nullptr) { 432 Atomic::inc(&_in_place_count); 433 } 434 435 if (target != nullptr) { 436 // Retire the old target page 437 retire_target_page(_generation, target); 438 } 439 440 return page; 441 } 442 443 void share_target_page(ZPage* page) { 444 // Does nothing 445 } 446 447 void free_target_page(ZPage* page) { 448 if (page != nullptr) { 449 retire_target_page(_generation, page); 450 } 451 } 452 453 zaddress alloc_object(ZPage* page, size_t size) const { 454 return (page != nullptr) ? page->alloc_object(size) : zaddress::null; 455 } 456 457 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const { 458 page->undo_alloc_object(addr, size); 459 } 460 461 size_t in_place_count() const { 462 return _in_place_count; 463 } 464 }; 465 466 class ZRelocateMediumAllocator { 467 private: 468 ZGeneration* const _generation; 469 ZConditionLock _lock; 470 ZPage* _shared[ZAllocator::_relocation_allocators]; 471 bool _in_place; 472 volatile size_t _in_place_count; 473 474 public: 475 ZRelocateMediumAllocator(ZGeneration* generation) 476 : _generation(generation), 477 _lock(), 478 _shared(), 479 _in_place(false), 480 _in_place_count(0) {} 481 482 ~ZRelocateMediumAllocator() { 483 for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) { 484 if (_shared[i] != nullptr) { 485 retire_target_page(_generation, _shared[i]); 486 } 487 } 488 } 489 490 ZPage* shared(ZPageAge age) { 491 return _shared[static_cast<uint>(age) - 1]; 492 } 493 494 void set_shared(ZPageAge age, ZPage* page) { 495 _shared[static_cast<uint>(age) - 1] = page; 496 } 497 498 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) { 499 ZLocker<ZConditionLock> locker(&_lock); 500 501 // Wait for any ongoing in-place relocation to complete 502 while (_in_place) { 503 _lock.wait(); 504 } 505 506 // Allocate a new page only if the shared page is the same as the 507 // current target page. The shared page will be different from the 508 // current target page if another thread shared a page, or allocated 509 // a new page. 510 const ZPageAge to_age = forwarding->to_age(); 511 if (shared(to_age) == target) { 512 ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age()); 513 ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size()); 514 set_shared(to_age, to_page); 515 if (to_page == nullptr) { 516 Atomic::inc(&_in_place_count); 517 _in_place = true; 518 } 519 520 // This thread is responsible for retiring the shared target page 521 if (target != nullptr) { 522 retire_target_page(_generation, target); 523 } 524 } 525 526 return shared(to_age); 527 } 528 529 void share_target_page(ZPage* page) { 530 const ZPageAge age = page->age(); 531 532 ZLocker<ZConditionLock> locker(&_lock); 533 assert(_in_place, "Invalid state"); 534 assert(shared(age) == nullptr, "Invalid state"); 535 assert(page != nullptr, "Invalid page"); 536 537 set_shared(age, page); 538 _in_place = false; 539 540 _lock.notify_all(); 541 } 542 543 void free_target_page(ZPage* page) { 544 // Does nothing 545 } 546 547 zaddress alloc_object(ZPage* page, size_t size) const { 548 return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null; 549 } 550 551 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const { 552 page->undo_alloc_object_atomic(addr, size); 553 } 554 555 size_t in_place_count() const { 556 return _in_place_count; 557 } 558 }; 559 560 template <typename Allocator> 561 class ZRelocateWork : public StackObj { 562 private: 563 Allocator* const _allocator; 564 ZForwarding* _forwarding; 565 ZPage* _target[ZAllocator::_relocation_allocators]; 566 ZGeneration* const _generation; 567 size_t _other_promoted; 568 size_t _other_compacted; 569 570 ZPage* target(ZPageAge age) { 571 return _target[static_cast<uint>(age) - 1]; 572 } 573 574 void set_target(ZPageAge age, ZPage* page) { 575 _target[static_cast<uint>(age) - 1] = page; 576 } 577 578 size_t object_alignment() const { 579 return (size_t)1 << _forwarding->object_alignment_shift(); 580 } 581 582 void increase_other_forwarded(size_t unaligned_object_size) { 583 const size_t aligned_size = align_up(unaligned_object_size, object_alignment()); 584 if (_forwarding->is_promotion()) { 585 _other_promoted += aligned_size; 586 } else { 587 _other_compacted += aligned_size; 588 } 589 } 590 591 zaddress try_relocate_object_inner(zaddress from_addr) { 592 ZForwardingCursor cursor; 593 594 ZPage* const to_page = target(_forwarding->to_age()); 595 596 // Lookup forwarding 597 { 598 const zaddress to_addr = _forwarding->find(from_addr, &cursor); 599 if (!is_null(to_addr)) { 600 // Already relocated 601 const size_t size = ZUtils::object_size(to_addr); 602 increase_other_forwarded(size); 603 return to_addr; 604 } 605 } 606 607 // Allocate object 608 const size_t size = ZUtils::object_size(from_addr); 609 const zaddress allocated_addr = _allocator->alloc_object(to_page, size); 610 if (is_null(allocated_addr)) { 611 // Allocation failed 612 return zaddress::null; 613 } 614 615 // Copy object. Use conjoint copying if we are relocating 616 // in-place and the new object overlaps with the old object. 617 if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) { 618 ZUtils::object_copy_conjoint(from_addr, allocated_addr, size); 619 } else { 620 ZUtils::object_copy_disjoint(from_addr, allocated_addr, size); 621 } 622 623 // Insert forwarding 624 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor); 625 if (to_addr != allocated_addr) { 626 // Already relocated, undo allocation 627 _allocator->undo_alloc_object(to_page, to_addr, size); 628 increase_other_forwarded(size); 629 } 630 631 return to_addr; 632 } 633 634 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const { 635 // Old-to-old relocation - move existing remset bits 636 637 // If this is called for an in-place relocated page, then this code has the 638 // responsibility to clear the old remset bits. Extra care is needed because: 639 // 640 // 1) The to-object copy can overlap with the from-object copy 641 // 2) Remset bits of old objects need to be cleared 642 // 643 // A watermark is used to keep track of how far the old remset bits have been removed. 644 645 const bool in_place = _forwarding->in_place_relocation(); 646 ZPage* const from_page = _forwarding->page(); 647 const uintptr_t from_local_offset = from_page->local_offset(from_addr); 648 649 // Note: even with in-place relocation, the to_page could be another page 650 ZPage* const to_page = ZHeap::heap()->page(to_addr); 651 652 // Uses _relaxed version to handle that in-place relocation resets _top 653 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be"); 654 assert(to_page->is_in(to_addr), "Must be"); 655 656 657 // Read the size from the to-object, since the from-object 658 // could have been overwritten during in-place relocation. 659 const size_t size = ZUtils::object_size(to_addr); 660 661 // If a young generation collection started while the old generation 662 // relocated objects, the remember set bits were flipped from "current" 663 // to "previous". 664 // 665 // We need to select the correct remembered sets bitmap to ensure that the 666 // old remset bits are found. 667 // 668 // Note that if the young generation marking (remset scanning) finishes 669 // before the old generation relocation has relocated this page, then the 670 // young generation will visit this page's previous remembered set bits and 671 // moved them over to the current bitmap. 672 // 673 // If the young generation runs multiple cycles while the old generation is 674 // relocating, then the first cycle will have consume the the old remset, 675 // bits and moved associated objects to a new old page. The old relocation 676 // could find either the the two bitmaps. So, either it will find the original 677 // remset bits for the page, or it will find an empty bitmap for the page. It 678 // doesn't matter for correctness, because the young generation marking has 679 // already taken care of the bits. 680 681 const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current(); 682 683 // When in-place relocation is done and the old remset bits are located in 684 // the bitmap that is going to be used for the new remset bits, then we 685 // need to clear the old bits before the new bits are inserted. 686 const bool iterate_current_remset = active_remset_is_current && !in_place; 687 688 BitMap::Iterator iter = iterate_current_remset 689 ? from_page->remset_iterator_limited_current(from_local_offset, size) 690 : from_page->remset_iterator_limited_previous(from_local_offset, size); 691 692 for (BitMap::idx_t field_bit : iter) { 693 const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit); 694 695 // Add remset entry in the to-page 696 const uintptr_t offset = field_local_offset - from_local_offset; 697 const zaddress to_field = to_addr + offset; 698 log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT, 699 untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current())); 700 701 volatile zpointer* const p = (volatile zpointer*)to_field; 702 703 if (ZGeneration::young()->is_phase_mark()) { 704 // Young generation remembered set scanning needs to know about this 705 // field. It will take responsibility to add a new remember set entry if needed. 706 _forwarding->relocated_remembered_fields_register(p); 707 } else { 708 to_page->remember(p); 709 if (in_place) { 710 assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p)); 711 } 712 } 713 } 714 } 715 716 static bool add_remset_if_young(volatile zpointer* p, zaddress addr) { 717 if (ZHeap::heap()->is_young(addr)) { 718 ZRelocate::add_remset(p); 719 return true; 720 } 721 722 return false; 723 } 724 725 static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) { 726 const zpointer ptr = Atomic::load(p); 727 728 assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr)); 729 730 if (ZPointer::is_store_good(ptr)) { 731 // Already has a remset entry 732 return; 733 } 734 735 if (ZPointer::is_load_good(ptr)) { 736 if (!is_null_any(ptr)) { 737 const zaddress addr = ZPointer::uncolor(ptr); 738 add_remset_if_young(p, addr); 739 } 740 // No need to remap it is already load good 741 return; 742 } 743 744 if (is_null_any(ptr)) { 745 // Eagerly remap to skip adding a remset entry just to get deferred remapping 746 ZBarrier::remap_young_relocated(p, ptr); 747 return; 748 } 749 750 const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr); 751 ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe); 752 753 if (forwarding == nullptr) { 754 // Object isn't being relocated 755 const zaddress addr = safe(addr_unsafe); 756 if (!add_remset_if_young(p, addr)) { 757 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping 758 ZBarrier::remap_young_relocated(p, ptr); 759 } 760 return; 761 } 762 763 const zaddress addr = forwarding->find(addr_unsafe); 764 765 if (!is_null(addr)) { 766 // Object has already been relocated 767 if (!add_remset_if_young(p, addr)) { 768 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping 769 ZBarrier::remap_young_relocated(p, ptr); 770 } 771 return; 772 } 773 774 // Object has not been relocated yet 775 // Don't want to eagerly relocate objects, so just add a remset 776 ZRelocate::add_remset(p); 777 return; 778 } 779 780 void update_remset_promoted(zaddress to_addr) const { 781 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field); 782 } 783 784 void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const { 785 if (_forwarding->to_age() != ZPageAge::old) { 786 // No remembered set in young pages 787 return; 788 } 789 790 // Need to deal with remset when moving objects to the old generation 791 if (_forwarding->from_age() == ZPageAge::old) { 792 update_remset_old_to_old(from_addr, to_addr); 793 return; 794 } 795 796 // Normal promotion 797 update_remset_promoted(to_addr); 798 } 799 800 bool try_relocate_object(zaddress from_addr) { 801 const zaddress to_addr = try_relocate_object_inner(from_addr); 802 803 if (is_null(to_addr)) { 804 return false; 805 } 806 807 update_remset_for_fields(from_addr, to_addr); 808 809 return true; 810 } 811 812 void start_in_place_relocation_prepare_remset(ZPage* from_page) { 813 if (_forwarding->from_age() != ZPageAge::old) { 814 // Only old pages have use remset bits 815 return; 816 } 817 818 if (ZGeneration::old()->active_remset_is_current()) { 819 // We want to iterate over and clear the remset bits of the from-space page, 820 // and insert current bits in the to-space page. However, with in-place 821 // relocation, the from-space and to-space pages are the same. Clearing 822 // is destructive, and is difficult to perform before or during the iteration. 823 // However, clearing of the current bits has to be done before exposing the 824 // to-space objects in the forwarding table. 825 // 826 // To solve this tricky dependency problem, we start by stashing away the 827 // current bits in the previous bits, and clearing the current bits 828 // (implemented by swapping the bits). This way, the current bits are 829 // cleared before copying the objects (like a normal to-space page), 830 // and the previous bits are representing a copy of the current bits 831 // of the from-space page, and are used for iteration. 832 from_page->swap_remset_bitmaps(); 833 } 834 } 835 836 ZPage* start_in_place_relocation(zoffset relocated_watermark) { 837 _forwarding->in_place_relocation_claim_page(); 838 _forwarding->in_place_relocation_start(relocated_watermark); 839 840 ZPage* const from_page = _forwarding->page(); 841 842 const ZPageAge to_age = _forwarding->to_age(); 843 const bool promotion = _forwarding->is_promotion(); 844 845 // Promotions happen through a new cloned page 846 ZPage* const to_page = promotion ? from_page->clone_limited() : from_page; 847 to_page->reset(to_age, ZPageResetType::InPlaceRelocation); 848 849 // Clear remset bits for all objects that were relocated 850 // before this page became an in-place relocated page. 851 start_in_place_relocation_prepare_remset(from_page); 852 853 if (promotion) { 854 // Register the the promotion 855 ZGeneration::young()->in_place_relocate_promote(from_page, to_page); 856 ZGeneration::young()->register_in_place_relocate_promoted(from_page); 857 } 858 859 return to_page; 860 } 861 862 void relocate_object(oop obj) { 863 const zaddress addr = to_zaddress(obj); 864 assert(ZHeap::heap()->is_object_live(addr), "Should be live"); 865 866 while (!try_relocate_object(addr)) { 867 // Allocate a new target page, or if that fails, use the page being 868 // relocated as the new target, which will cause it to be relocated 869 // in-place. 870 const ZPageAge to_age = _forwarding->to_age(); 871 ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age)); 872 set_target(to_age, to_page); 873 if (to_page != nullptr) { 874 continue; 875 } 876 877 // Start in-place relocation to block other threads from accessing 878 // the page, or its forwarding table, until it has been released 879 // (relocation completed). 880 to_page = start_in_place_relocation(ZAddress::offset(addr)); 881 set_target(to_age, to_page); 882 } 883 } 884 885 public: 886 ZRelocateWork(Allocator* allocator, ZGeneration* generation) 887 : _allocator(allocator), 888 _forwarding(nullptr), 889 _target(), 890 _generation(generation), 891 _other_promoted(0), 892 _other_compacted(0) {} 893 894 ~ZRelocateWork() { 895 for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) { 896 _allocator->free_target_page(_target[i]); 897 } 898 // Report statistics on-behalf of non-worker threads 899 _generation->increase_promoted(_other_promoted); 900 _generation->increase_compacted(_other_compacted); 901 } 902 903 bool active_remset_is_current() const { 904 // Normal old-to-old relocation can treat the from-page remset as a 905 // read-only copy, and then copy over the appropriate remset bits to the 906 // cleared to-page's 'current' remset bitmap. 907 // 908 // In-place relocation is more complicated. Since, the same page is both 909 // a from-page and a to-page, we need to remove the old remset bits, and 910 // add remset bits that corresponds to the new locations of the relocated 911 // objects. 912 // 913 // Depending on how long ago (in terms of number of young GC's and the 914 // current young GC's phase), the page was allocated, the active 915 // remembered set will be in either the 'current' or 'previous' bitmap. 916 // 917 // If the active bits are in the 'previous' bitmap, we know that the 918 // 'current' bitmap was cleared at some earlier point in time, and we can 919 // simply set new bits in 'current' bitmap, and later when relocation has 920 // read all the old remset bits, we could just clear the 'previous' remset 921 // bitmap. 922 // 923 // If, on the other hand, the active bits are in the 'current' bitmap, then 924 // that bitmap will be used to both read the old remset bits, and the 925 // destination for the remset bits that we copy when an object is copied 926 // to it's new location within the page. We need to *carefully* remove all 927 // all old remset bits, without clearing out the newly set bits. 928 return ZGeneration::old()->active_remset_is_current(); 929 } 930 931 void clear_remset_before_reuse(ZPage* page, bool in_place) { 932 if (_forwarding->from_age() != ZPageAge::old) { 933 // No remset bits 934 return; 935 } 936 937 if (in_place) { 938 // Clear 'previous' remset bits. For in-place relocated pages, the previous 939 // remset bits are always used, even when active_remset_is_current(). 940 page->clear_remset_previous(); 941 942 return; 943 } 944 945 // Normal relocate 946 947 // Clear active remset bits 948 if (active_remset_is_current()) { 949 page->clear_remset_current(); 950 } else { 951 page->clear_remset_previous(); 952 } 953 954 // Verify that inactive remset bits are all cleared 955 if (active_remset_is_current()) { 956 page->verify_remset_cleared_previous(); 957 } else { 958 page->verify_remset_cleared_current(); 959 } 960 } 961 962 void finish_in_place_relocation() { 963 // We are done with the from_space copy of the page 964 _forwarding->in_place_relocation_finish(); 965 } 966 967 void do_forwarding(ZForwarding* forwarding) { 968 _forwarding = forwarding; 969 970 _forwarding->page()->log_msg(" (relocate page)"); 971 972 ZVerify::before_relocation(_forwarding); 973 974 // Relocate objects 975 _forwarding->object_iterate([&](oop obj) { relocate_object(obj); }); 976 977 ZVerify::after_relocation(_forwarding); 978 979 // Verify 980 if (ZVerifyForwarding) { 981 _forwarding->verify(); 982 } 983 984 _generation->increase_freed(_forwarding->page()->size()); 985 986 // Deal with in-place relocation 987 const bool in_place = _forwarding->in_place_relocation(); 988 if (in_place) { 989 finish_in_place_relocation(); 990 } 991 992 // Old from-space pages need to deal with remset bits 993 if (_forwarding->from_age() == ZPageAge::old) { 994 _forwarding->relocated_remembered_fields_after_relocate(); 995 } 996 997 // Release relocated page 998 _forwarding->release_page(); 999 1000 if (in_place) { 1001 // Wait for all other threads to call release_page 1002 ZPage* const page = _forwarding->detach_page(); 1003 1004 // Ensure that previous remset bits are cleared 1005 clear_remset_before_reuse(page, true /* in_place */); 1006 1007 page->log_msg(" (relocate page done in-place)"); 1008 1009 // Different pages when promoting 1010 ZPage* const target_page = target(_forwarding->to_age()); 1011 _allocator->share_target_page(target_page); 1012 1013 } else { 1014 // Wait for all other threads to call release_page 1015 ZPage* const page = _forwarding->detach_page(); 1016 1017 // Ensure that all remset bits are cleared 1018 // Note: cleared after detach_page, when we know that 1019 // the young generation isn't scanning the remset. 1020 clear_remset_before_reuse(page, false /* in_place */); 1021 1022 page->log_msg(" (relocate page done normal)"); 1023 1024 // Free page 1025 ZHeap::heap()->free_page(page); 1026 } 1027 } 1028 }; 1029 1030 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure { 1031 public: 1032 virtual void do_thread(Thread* thread) { 1033 JavaThread* const jt = JavaThread::cast(thread); 1034 ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt); 1035 buffer->install_base_pointers(); 1036 } 1037 }; 1038 1039 // Installs the object base pointers (object starts), for the fields written 1040 // in the store buffer. The code that searches for the object start uses that 1041 // liveness information stored in the pages. That information is lost when the 1042 // pages have been relocated and then destroyed. 1043 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask { 1044 private: 1045 ZJavaThreadsIterator _threads_iter; 1046 1047 public: 1048 ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation) 1049 : ZTask("ZRelocateStoreBufferInstallBasePointersTask"), 1050 _threads_iter(generation->id_optional()) {} 1051 1052 virtual void work() { 1053 ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl; 1054 _threads_iter.apply(&fix_store_buffer_cl); 1055 } 1056 }; 1057 1058 class ZRelocateTask : public ZRestartableTask { 1059 private: 1060 ZRelocationSetParallelIterator _iter; 1061 ZGeneration* const _generation; 1062 ZRelocateQueue* const _queue; 1063 ZRelocateSmallAllocator _small_allocator; 1064 ZRelocateMediumAllocator _medium_allocator; 1065 1066 public: 1067 ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue) 1068 : ZRestartableTask("ZRelocateTask"), 1069 _iter(relocation_set), 1070 _generation(relocation_set->generation()), 1071 _queue(queue), 1072 _small_allocator(_generation), 1073 _medium_allocator(_generation) {} 1074 1075 ~ZRelocateTask() { 1076 _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count()); 1077 1078 // Signal that we're not using the queue anymore. Used mostly for asserts. 1079 _queue->deactivate(); 1080 } 1081 1082 virtual void work() { 1083 ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation); 1084 ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation); 1085 1086 const auto do_forwarding = [&](ZForwarding* forwarding) { 1087 ZPage* const page = forwarding->page(); 1088 if (page->is_small()) { 1089 small.do_forwarding(forwarding); 1090 } else { 1091 medium.do_forwarding(forwarding); 1092 } 1093 1094 // Absolute last thing done while relocating a page. 1095 // 1096 // We don't use the SuspendibleThreadSet when relocating pages. 1097 // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver. 1098 // 1099 // After the mark_done call a safepointing could be completed and a 1100 // new GC phase could be entered. 1101 forwarding->mark_done(); 1102 }; 1103 1104 const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) { 1105 if (forwarding->claim()) { 1106 do_forwarding(forwarding); 1107 } 1108 }; 1109 1110 const auto do_forwarding_one_from_iter = [&]() { 1111 ZForwarding* forwarding; 1112 1113 if (_iter.next(&forwarding)) { 1114 claim_and_do_forwarding(forwarding); 1115 return true; 1116 } 1117 1118 return false; 1119 }; 1120 1121 for (;;) { 1122 // As long as there are requests in the relocate queue, there are threads 1123 // waiting in a VM state that does not allow them to be blocked. The 1124 // worker thread needs to finish relocate these pages, and allow the 1125 // other threads to continue and proceed to a blocking state. After that, 1126 // the worker threads are allowed to safepoint synchronize. 1127 for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) { 1128 do_forwarding(forwarding); 1129 } 1130 1131 if (!do_forwarding_one_from_iter()) { 1132 // No more work 1133 break; 1134 } 1135 1136 if (_generation->should_worker_resize()) { 1137 break; 1138 } 1139 } 1140 1141 _queue->leave(); 1142 } 1143 1144 virtual void resize_workers(uint nworkers) { 1145 _queue->resize_workers(nworkers); 1146 } 1147 }; 1148 1149 static void remap_and_maybe_add_remset(volatile zpointer* p) { 1150 const zpointer ptr = Atomic::load(p); 1151 1152 if (ZPointer::is_store_good(ptr)) { 1153 // Already has a remset entry 1154 return; 1155 } 1156 1157 // Remset entries are used for two reasons: 1158 // 1) Young marking old-to-young pointer roots 1159 // 2) Deferred remapping of stale old-to-young pointers 1160 // 1161 // This load barrier will up-front perform the remapping of (2), 1162 // and the code below only has to make sure we register up-to-date 1163 // old-to-young pointers for (1). 1164 const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr); 1165 1166 if (is_null(addr)) { 1167 // No need for remset entries for null pointers 1168 return; 1169 } 1170 1171 if (ZHeap::heap()->is_old(addr)) { 1172 // No need for remset entries for pointers to old gen 1173 return; 1174 } 1175 1176 ZRelocate::add_remset(p); 1177 } 1178 1179 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask { 1180 private: 1181 ZStatTimerYoung _timer; 1182 ZArrayParallelIterator<ZPage*> _iter; 1183 1184 public: 1185 ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages) 1186 : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"), 1187 _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung), 1188 _iter(pages) {} 1189 1190 virtual void work() { 1191 SuspendibleThreadSetJoiner sts_joiner; 1192 1193 for (ZPage* page; _iter.next(&page);) { 1194 page->object_iterate([&](oop obj) { 1195 ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset); 1196 }); 1197 1198 SuspendibleThreadSet::yield(); 1199 if (ZGeneration::young()->should_worker_resize()) { 1200 return; 1201 } 1202 } 1203 } 1204 }; 1205 1206 void ZRelocate::relocate(ZRelocationSet* relocation_set) { 1207 { 1208 // Install the store buffer's base pointers before the 1209 // relocate task destroys the liveness information in 1210 // the relocated pages. 1211 ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation); 1212 workers()->run(&buffer_task); 1213 } 1214 1215 { 1216 ZRelocateTask relocate_task(relocation_set, &_queue); 1217 workers()->run(&relocate_task); 1218 } 1219 1220 if (relocation_set->generation()->is_young()) { 1221 ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages()); 1222 workers()->run(&task); 1223 } 1224 } 1225 1226 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) { 1227 if (from_age == ZPageAge::old) { 1228 return ZPageAge::old; 1229 } 1230 1231 const uint age = static_cast<uint>(from_age); 1232 if (age >= ZGeneration::young()->tenuring_threshold()) { 1233 return ZPageAge::old; 1234 } 1235 1236 return static_cast<ZPageAge>(age + 1); 1237 } 1238 1239 class ZFlipAgePagesTask : public ZTask { 1240 private: 1241 ZArrayParallelIterator<ZPage*> _iter; 1242 1243 public: 1244 ZFlipAgePagesTask(const ZArray<ZPage*>* pages) 1245 : ZTask("ZPromotePagesTask"), 1246 _iter(pages) {} 1247 1248 virtual void work() { 1249 SuspendibleThreadSetJoiner sts_joiner; 1250 ZArray<ZPage*> promoted_pages; 1251 1252 for (ZPage* prev_page; _iter.next(&prev_page);) { 1253 const ZPageAge from_age = prev_page->age(); 1254 const ZPageAge to_age = ZRelocate::compute_to_age(from_age); 1255 assert(from_age != ZPageAge::old, "invalid age for a young collection"); 1256 1257 // Figure out if this is proper promotion 1258 const bool promotion = to_age == ZPageAge::old; 1259 1260 if (promotion) { 1261 // Before promoting an object (and before relocate start), we must ensure that all 1262 // contained zpointers are store good. The marking code ensures that for non-null 1263 // pointers, but null pointers are ignored. This code ensures that even null pointers 1264 // are made store good, for the promoted objects. 1265 prev_page->object_iterate([&](oop obj) { 1266 ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field); 1267 }); 1268 } 1269 1270 // Logging 1271 prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)"); 1272 1273 // Setup to-space page 1274 ZPage* const new_page = promotion ? prev_page->clone_limited_promote_flipped() : prev_page; 1275 new_page->reset(to_age, ZPageResetType::FlipAging); 1276 1277 if (promotion) { 1278 ZGeneration::young()->flip_promote(prev_page, new_page); 1279 // Defer promoted page registration times the lock is taken 1280 promoted_pages.push(prev_page); 1281 } 1282 1283 SuspendibleThreadSet::yield(); 1284 } 1285 1286 ZGeneration::young()->register_flip_promoted(promoted_pages); 1287 } 1288 }; 1289 1290 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) { 1291 ZFlipAgePagesTask flip_age_task(pages); 1292 workers()->run(&flip_age_task); 1293 } 1294 1295 void ZRelocate::synchronize() { 1296 _queue.synchronize(); 1297 } 1298 1299 void ZRelocate::desynchronize() { 1300 _queue.desynchronize(); 1301 } 1302 1303 ZRelocateQueue* ZRelocate::queue() { 1304 return &_queue; 1305 } 1306 1307 bool ZRelocate::is_queue_active() const { 1308 return _queue.is_active(); 1309 }