1 /*
2 * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "gc/shared/gc_globals.hpp"
25 #include "gc/shared/suspendibleThreadSet.hpp"
26 #include "gc/z/zAbort.inline.hpp"
27 #include "gc/z/zAddress.inline.hpp"
28 #include "gc/z/zBarrier.inline.hpp"
29 #include "gc/z/zCollectedHeap.hpp"
30 #include "gc/z/zForwarding.inline.hpp"
31 #include "gc/z/zGeneration.inline.hpp"
32 #include "gc/z/zHeap.inline.hpp"
33 #include "gc/z/zIndexDistributor.inline.hpp"
34 #include "gc/z/zIterator.inline.hpp"
35 #include "gc/z/zNUMA.inline.hpp"
36 #include "gc/z/zObjectAllocator.hpp"
37 #include "gc/z/zPage.inline.hpp"
38 #include "gc/z/zPageAge.inline.hpp"
39 #include "gc/z/zRelocate.hpp"
40 #include "gc/z/zRelocationSet.inline.hpp"
41 #include "gc/z/zRootsIterator.hpp"
42 #include "gc/z/zStackWatermark.hpp"
43 #include "gc/z/zStat.hpp"
44 #include "gc/z/zStringDedup.inline.hpp"
45 #include "gc/z/zTask.hpp"
46 #include "gc/z/zUncoloredRoot.inline.hpp"
47 #include "gc/z/zValue.inline.hpp"
48 #include "gc/z/zVerify.hpp"
49 #include "gc/z/zWorkers.hpp"
50 #include "prims/jvmtiTagMap.hpp"
51 #include "runtime/atomicAccess.hpp"
52 #include "utilities/debug.hpp"
53
54 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
55 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
56
57 ZRelocateQueue::ZRelocateQueue()
58 : _lock(),
59 _queue(),
60 _nworkers(0),
61 _nsynchronized(0),
62 _synchronize(false),
63 _is_active(false),
64 _needs_attention(0) {}
65
66 bool ZRelocateQueue::needs_attention() const {
67 return AtomicAccess::load(&_needs_attention) != 0;
68 }
69
70 void ZRelocateQueue::inc_needs_attention() {
71 const int needs_attention = AtomicAccess::add(&_needs_attention, 1);
72 assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
73 }
74
75 void ZRelocateQueue::dec_needs_attention() {
76 const int needs_attention = AtomicAccess::sub(&_needs_attention, 1);
77 assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
78 }
79
80 void ZRelocateQueue::activate(uint nworkers) {
81 _is_active = true;
82 join(nworkers);
83 }
84
85 void ZRelocateQueue::deactivate() {
86 AtomicAccess::store(&_is_active, false);
87 clear();
88 }
89
90 bool ZRelocateQueue::is_active() const {
91 return AtomicAccess::load(&_is_active);
92 }
93
94 void ZRelocateQueue::join(uint nworkers) {
95 assert(nworkers != 0, "Must request at least one worker");
96 assert(_nworkers == 0, "Invalid state");
97 assert(_nsynchronized == 0, "Invalid state");
98
99 log_debug(gc, reloc)("Joining workers: %u", nworkers);
100
101 _nworkers = nworkers;
102 }
103
104 void ZRelocateQueue::resize_workers(uint nworkers) {
105 assert(nworkers != 0, "Must request at least one worker");
106 assert(_nworkers == 0, "Invalid state");
107 assert(_nsynchronized == 0, "Invalid state");
108
109 log_debug(gc, reloc)("Resize workers: %u", nworkers);
110
111 ZLocker<ZConditionLock> locker(&_lock);
112 _nworkers = nworkers;
113 }
114
115 void ZRelocateQueue::leave() {
116 ZLocker<ZConditionLock> locker(&_lock);
117 _nworkers--;
118
119 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
120
121 log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
122
123 // Prune done forwardings
124 const bool forwardings_done = prune();
125
126 // Check if all workers synchronized
127 const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
128
129 if (forwardings_done || last_synchronized) {
130 _lock.notify_all();
131 }
132 }
133
134 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
135 ZStatTimer timer(ZCriticalPhaseRelocationStall);
136 ZLocker<ZConditionLock> locker(&_lock);
137
138 if (forwarding->is_done()) {
139 return;
140 }
141
142 _queue.append(forwarding);
143 if (_queue.length() == 1) {
144 // Queue became non-empty
145 inc_needs_attention();
146 _lock.notify_all();
147 }
148
149 while (!forwarding->is_done()) {
150 _lock.wait();
151 }
152 }
153
154 bool ZRelocateQueue::prune() {
155 if (_queue.is_empty()) {
156 return false;
157 }
158
159 bool done = false;
160
161 for (int i = 0; i < _queue.length();) {
162 const ZForwarding* const forwarding = _queue.at(i);
163 if (forwarding->is_done()) {
164 done = true;
165
166 _queue.delete_at(i);
167 } else {
168 i++;
169 }
170 }
171
172 if (_queue.is_empty()) {
173 dec_needs_attention();
174 }
175
176 return done;
177 }
178
179 ZForwarding* ZRelocateQueue::prune_and_claim() {
180 if (prune()) {
181 _lock.notify_all();
182 }
183
184 for (int i = 0; i < _queue.length(); i++) {
185 ZForwarding* const forwarding = _queue.at(i);
186 if (forwarding->claim()) {
187 return forwarding;
188 }
189 }
190
191 return nullptr;
192 }
193
194 class ZRelocateQueueSynchronizeThread {
195 private:
196 ZRelocateQueue* const _queue;
197
198 public:
199 ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
200 : _queue(queue) {
201 _queue->synchronize_thread();
202 }
203
204 ~ZRelocateQueueSynchronizeThread() {
205 _queue->desynchronize_thread();
206 }
207 };
208
209 void ZRelocateQueue::synchronize_thread() {
210 _nsynchronized++;
211
212 log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
213
214 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
215 if (_nsynchronized == _nworkers) {
216 // All workers synchronized
217 _lock.notify_all();
218 }
219 }
220
221 void ZRelocateQueue::desynchronize_thread() {
222 _nsynchronized--;
223
224 log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
225
226 assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
227 }
228
229 ZForwarding* ZRelocateQueue::synchronize_poll() {
230 // Fast path avoids locking
231 if (!needs_attention()) {
232 return nullptr;
233 }
234
235 // Slow path to get the next forwarding and/or synchronize
236 ZLocker<ZConditionLock> locker(&_lock);
237
238 {
239 ZForwarding* const forwarding = prune_and_claim();
240 if (forwarding != nullptr) {
241 // Don't become synchronized while there are elements in the queue
242 return forwarding;
243 }
244 }
245
246 if (!_synchronize) {
247 return nullptr;
248 }
249
250 ZRelocateQueueSynchronizeThread rqst(this);
251
252 do {
253 _lock.wait();
254
255 ZForwarding* const forwarding = prune_and_claim();
256 if (forwarding != nullptr) {
257 return forwarding;
258 }
259 } while (_synchronize);
260
261 return nullptr;
262 }
263
264 void ZRelocateQueue::clear() {
265 assert(_nworkers == 0, "Invalid state");
266
267 if (_queue.is_empty()) {
268 return;
269 }
270
271 ZArrayIterator<ZForwarding*> iter(&_queue);
272 for (ZForwarding* forwarding; iter.next(&forwarding);) {
273 assert(forwarding->is_done(), "All should be done");
274 }
275
276 assert(false, "Clear was not empty");
277
278 _queue.clear();
279 dec_needs_attention();
280 }
281
282 void ZRelocateQueue::synchronize() {
283 ZLocker<ZConditionLock> locker(&_lock);
284 _synchronize = true;
285
286 inc_needs_attention();
287
288 log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
289
290 while (_nworkers != _nsynchronized) {
291 _lock.wait();
292 log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
293 }
294 }
295
296 void ZRelocateQueue::desynchronize() {
297 ZLocker<ZConditionLock> locker(&_lock);
298 _synchronize = false;
299
300 log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
301
302 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
303
304 dec_needs_attention();
305
306 _lock.notify_all();
307 }
308
309 ZRelocationTargets::ZRelocationTargets()
310 : _targets() {}
311
312 ZPage* ZRelocationTargets::get(uint32_t partition_id, ZPageAge age) {
313 return _targets.get(partition_id)[untype(age) - 1];
314 }
315
316 void ZRelocationTargets::set(uint32_t partition_id, ZPageAge age, ZPage* page) {
317 _targets.get(partition_id)[untype(age) - 1] = page;
318 }
319
320 template <typename Function>
321 void ZRelocationTargets::apply_and_clear_targets(Function function) {
322 ZPerNUMAIterator<TargetArray> iter(&_targets);
323 for (TargetArray* targets; iter.next(&targets);) {
324 for (size_t i = 0; i < ZNumRelocationAges; i++) {
325 // Apply function
326 function((*targets)[i]);
327
328 // Clear target
329 (*targets)[i] = nullptr;
330 }
331 }
332 }
333
334 ZRelocate::ZRelocate(ZGeneration* generation)
335 : _generation(generation),
336 _queue(),
337 _iters(),
338 _small_targets(),
339 _medium_targets(),
340 _shared_medium_targets() {}
341
342 ZWorkers* ZRelocate::workers() const {
343 return _generation->workers();
344 }
345
346 void ZRelocate::start() {
347 _queue.activate(workers()->active_workers());
348 }
349
350 void ZRelocate::add_remset(volatile zpointer* p) {
351 ZGeneration::young()->remember(p);
352 }
353
354 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
355 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
356
357 // Allocate object
358 const size_t size = ZUtils::object_size(from_addr);
359 const ZPageAge to_age = forwarding->to_age();
360
361 const zaddress to_addr = ZHeap::heap()->alloc_object_for_relocation(size, to_age);
362
363 if (is_null(to_addr)) {
364 // Allocation failed
365 return zaddress::null;
366 }
367
368 // Copy object
369 ZUtils::object_copy_disjoint(from_addr, to_addr, size);
370
371 // Insert forwarding
372 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
373
374 if (to_addr_final != to_addr) {
375 // Already relocated, try undo allocation
376 ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size);
377 }
378
379 return to_addr_final;
380 }
381
382 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
383 ZForwardingCursor cursor;
384
385 // Lookup forwarding
386 zaddress to_addr = forwarding->find(from_addr, &cursor);
387 if (!is_null(to_addr)) {
388 // Already relocated
389 return to_addr;
390 }
391
392 // Relocate object
393 if (forwarding->retain_page(&_queue)) {
394 assert(_generation->is_phase_relocate(), "Must be");
395 to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
396 forwarding->release_page();
397
398 if (!is_null(to_addr)) {
399 // Success
400 return to_addr;
401 }
402
403 // Failed to relocate object. Signal and wait for a worker thread to
404 // complete relocation of this page, and then forward the object.
405 _queue.add_and_wait(forwarding);
406 }
407
408 // Forward object
409 return forward_object(forwarding, from_addr);
410 }
411
412 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
413 const zaddress to_addr = forwarding->find(from_addr);
414 assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
415 return to_addr;
416 }
417
418 static ZPage* alloc_page(ZForwarding* forwarding) {
419 if (ZStressRelocateInPlace) {
420 // Simulate failure to allocate a new page. This will
421 // cause the page being relocated to be relocated in-place.
422 return nullptr;
423 }
424
425 const ZPageType type = forwarding->type();
426 const size_t size = forwarding->size();
427 const ZPageAge age = forwarding->to_age();
428 const uint32_t preferred_partition = forwarding->partition_id();
429
430 ZAllocationFlags flags;
431 flags.set_non_blocking();
432 flags.set_gc_relocation();
433
434 return ZHeap::heap()->alloc_page(type, size, flags, age, preferred_partition);
435 }
436
437 static void retire_target_page(ZGeneration* generation, ZPage* page) {
438 if (generation->is_young() && page->is_old()) {
439 generation->increase_promoted(page->used());
440 } else {
441 generation->increase_compacted(page->used());
442 }
443
444 // Free target page if it is empty. We can end up with an empty target
445 // page if we allocated a new target page, and then lost the race to
446 // relocate the remaining objects, leaving the target page empty when
447 // relocation completed.
448 if (page->used() == 0) {
449 ZHeap::heap()->free_page(page);
450 }
451 }
452
453 class ZRelocateSmallAllocator {
454 private:
455 ZGeneration* const _generation;
456 volatile size_t _in_place_count;
457
458 public:
459 ZRelocateSmallAllocator(ZGeneration* generation)
460 : _generation(generation),
461 _in_place_count(0) {}
462
463 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
464 ZPage* const page = alloc_page(forwarding);
465 if (page == nullptr) {
466 AtomicAccess::inc(&_in_place_count);
467 }
468
469 if (target != nullptr) {
470 // Retire the old target page
471 retire_target_page(_generation, target);
472 }
473
474 return page;
475 }
476
477 void share_target_page(ZPage* page, uint32_t partition_id) {
478 // Does nothing
479 }
480
481 void free_target_page(ZPage* page) {
482 if (page != nullptr) {
483 retire_target_page(_generation, page);
484 }
485 }
486
487 zaddress alloc_object(ZPage* page, size_t size) const {
488 return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
489 }
490
491 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
492 page->undo_alloc_object(addr, size);
493 }
494
495 size_t in_place_count() const {
496 return _in_place_count;
497 }
498 };
499
500 class ZRelocateMediumAllocator {
501 private:
502 ZGeneration* const _generation;
503 ZConditionLock _lock;
504 ZRelocationTargets* _shared_targets;
505 bool _in_place;
506 volatile size_t _in_place_count;
507
508 public:
509 ZRelocateMediumAllocator(ZGeneration* generation, ZRelocationTargets* shared_targets)
510 : _generation(generation),
511 _lock(),
512 _shared_targets(shared_targets),
513 _in_place(false),
514 _in_place_count(0) {}
515
516 ~ZRelocateMediumAllocator() {
517 _shared_targets->apply_and_clear_targets([&](ZPage* page) {
518 if (page != nullptr) {
519 retire_target_page(_generation, page);
520 }
521 });
522 }
523
524 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
525 ZLocker<ZConditionLock> locker(&_lock);
526
527 // Wait for any ongoing in-place relocation to complete
528 while (_in_place) {
529 _lock.wait();
530 }
531
532 // Allocate a new page only if the shared page is the same as the
533 // current target page. The shared page will be different from the
534 // current target page if another thread shared a page, or allocated
535 // a new page.
536 const ZPageAge to_age = forwarding->to_age();
537 const uint32_t partition_id = forwarding->partition_id();
538 if (_shared_targets->get(partition_id, to_age) == target) {
539 ZPage* const to_page = alloc_page(forwarding);
540 _shared_targets->set(partition_id, to_age, to_page);
541 if (to_page == nullptr) {
542 AtomicAccess::inc(&_in_place_count);
543 _in_place = true;
544 }
545
546 // This thread is responsible for retiring the shared target page
547 if (target != nullptr) {
548 retire_target_page(_generation, target);
549 }
550 }
551
552 return _shared_targets->get(partition_id, to_age);
553 }
554
555 void share_target_page(ZPage* page, uint32_t partition_id) {
556 const ZPageAge age = page->age();
557
558 ZLocker<ZConditionLock> locker(&_lock);
559 assert(_in_place, "Invalid state");
560 assert(_shared_targets->get(partition_id, age) == nullptr, "Invalid state");
561 assert(page != nullptr, "Invalid page");
562
563 _shared_targets->set(partition_id, age, page);
564 _in_place = false;
565
566 _lock.notify_all();
567 }
568
569 void free_target_page(ZPage* page) {
570 // Does nothing
571 }
572
573 zaddress alloc_object(ZPage* page, size_t size) const {
574 return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
575 }
576
577 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
578 page->undo_alloc_object_atomic(addr, size);
579 }
580
581 size_t in_place_count() const {
582 return _in_place_count;
583 }
584 };
585
586 template <typename Allocator>
587 class ZRelocateWork : public StackObj {
588 private:
589 Allocator* const _allocator;
590 ZForwarding* _forwarding;
591 ZRelocationTargets* _targets;
592 ZGeneration* const _generation;
593 size_t _other_promoted;
594 size_t _other_compacted;
595 ZStringDedupContext _string_dedup_context;
596
597 size_t object_alignment() const {
598 return (size_t)1 << _forwarding->object_alignment_shift();
599 }
600
601 void increase_other_forwarded(size_t unaligned_object_size) {
602 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
603 if (_forwarding->is_promotion()) {
604 _other_promoted += aligned_size;
605 } else {
606 _other_compacted += aligned_size;
607 }
608 }
609
610 zaddress try_relocate_object_inner(zaddress from_addr, uint32_t partition_id) {
611 ZForwardingCursor cursor;
612
613 const size_t size = ZUtils::object_size(from_addr);
614 ZPage* const to_page = _targets->get(partition_id, _forwarding->to_age());
615
616 // Lookup forwarding
617 {
618 const zaddress to_addr = _forwarding->find(from_addr, &cursor);
619 if (!is_null(to_addr)) {
620 // Already relocated
621 increase_other_forwarded(size);
622 return to_addr;
623 }
624 }
625
626 // Allocate object
627 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
628 if (is_null(allocated_addr)) {
629 // Allocation failed
630 return zaddress::null;
631 }
632
633 // Copy object. Use conjoint copying if we are relocating
634 // in-place and the new object overlaps with the old object.
635 if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
636 ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
637 } else {
638 ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
639 }
640
641 // Insert forwarding
642 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
643 if (to_addr != allocated_addr) {
644 // Already relocated, undo allocation
645 _allocator->undo_alloc_object(to_page, to_addr, size);
646 increase_other_forwarded(size);
647 }
648
649 return to_addr;
650 }
651
652 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
653 // Old-to-old relocation - move existing remset bits
654
655 // If this is called for an in-place relocated page, then this code has the
656 // responsibility to clear the old remset bits. Extra care is needed because:
657 //
658 // 1) The to-object copy can overlap with the from-object copy
659 // 2) Remset bits of old objects need to be cleared
660 //
661 // A watermark is used to keep track of how far the old remset bits have been removed.
662
663 const bool in_place = _forwarding->in_place_relocation();
664 ZPage* const from_page = _forwarding->page();
665 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
666
667 // Note: even with in-place relocation, the to_page could be another page
668 ZPage* const to_page = ZHeap::heap()->page(to_addr);
669
670 // Uses _relaxed version to handle that in-place relocation resets _top
671 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
672 assert(to_page->is_in(to_addr), "Must be");
673
674 // Read the size from the to-object, since the from-object
675 // could have been overwritten during in-place relocation.
676 const size_t size = ZUtils::object_size(to_addr);
677
678 // If a young generation collection started while the old generation
679 // relocated objects, the remember set bits were flipped from "current"
680 // to "previous".
681 //
682 // We need to select the correct remembered sets bitmap to ensure that the
683 // old remset bits are found.
684 //
685 // Note that if the young generation marking (remset scanning) finishes
686 // before the old generation relocation has relocated this page, then the
687 // young generation will visit this page's previous remembered set bits and
688 // moved them over to the current bitmap.
689 //
690 // If the young generation runs multiple cycles while the old generation is
691 // relocating, then the first cycle will have consumed the old remset,
692 // bits and moved associated objects to a new old page. The old relocation
693 // could find either of the two bitmaps. So, either it will find the original
694 // remset bits for the page, or it will find an empty bitmap for the page. It
695 // doesn't matter for correctness, because the young generation marking has
696 // already taken care of the bits.
697
698 const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
699
700 // When in-place relocation is done and the old remset bits are located in
701 // the bitmap that is going to be used for the new remset bits, then we
702 // need to clear the old bits before the new bits are inserted.
703 const bool iterate_current_remset = active_remset_is_current && !in_place;
704
705 BitMap::Iterator iter = iterate_current_remset
706 ? from_page->remset_iterator_limited_current(from_local_offset, size)
707 : from_page->remset_iterator_limited_previous(from_local_offset, size);
708
709 for (BitMap::idx_t field_bit : iter) {
710 const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
711
712 // Add remset entry in the to-page
713 const uintptr_t offset = field_local_offset - from_local_offset;
714 const zaddress to_field = to_addr + offset;
715 log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
716 untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
717
718 volatile zpointer* const p = (volatile zpointer*)to_field;
719
720 if (ZGeneration::young()->is_phase_mark()) {
721 // Young generation remembered set scanning needs to know about this
722 // field. It will take responsibility to add a new remember set entry if needed.
723 _forwarding->relocated_remembered_fields_register(p);
724 } else {
725 to_page->remember(p);
726 if (in_place) {
727 assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
728 }
729 }
730 }
731 }
732
733 static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
734 if (ZHeap::heap()->is_young(addr)) {
735 ZRelocate::add_remset(p);
736 return true;
737 }
738
739 return false;
740 }
741
742 static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
743 const zpointer ptr = AtomicAccess::load(p);
744
745 assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
746
747 if (ZPointer::is_store_good(ptr)) {
748 // Already has a remset entry
749 return;
750 }
751
752 if (ZPointer::is_load_good(ptr)) {
753 if (!is_null_any(ptr)) {
754 const zaddress addr = ZPointer::uncolor(ptr);
755 add_remset_if_young(p, addr);
756 }
757 // No need to remap it is already load good
758 return;
759 }
760
761 if (is_null_any(ptr)) {
762 // Eagerly remap to skip adding a remset entry just to get deferred remapping
763 ZBarrier::remap_young_relocated(p, ptr);
764 return;
765 }
766
767 const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
768 ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
769
770 if (forwarding == nullptr) {
771 // Object isn't being relocated
772 const zaddress addr = safe(addr_unsafe);
773 if (!add_remset_if_young(p, addr)) {
774 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
775 ZBarrier::remap_young_relocated(p, ptr);
776 }
777 return;
778 }
779
780 const zaddress addr = forwarding->find(addr_unsafe);
781
782 if (!is_null(addr)) {
783 // Object has already been relocated
784 if (!add_remset_if_young(p, addr)) {
785 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
786 ZBarrier::remap_young_relocated(p, ptr);
787 }
788 return;
789 }
790
791 // Object has not been relocated yet
792 // Don't want to eagerly relocate objects, so just add a remset
793 ZRelocate::add_remset(p);
794 return;
795 }
796
797 void update_remset_promoted(zaddress to_addr) const {
798 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
799 }
800
801 void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
802 if (_forwarding->to_age() != ZPageAge::old) {
803 // No remembered set in young pages
804 return;
805 }
806
807 // Need to deal with remset when moving objects to the old generation
808 if (_forwarding->from_age() == ZPageAge::old) {
809 update_remset_old_to_old(from_addr, to_addr);
810 return;
811 }
812
813 // Normal promotion
814 update_remset_promoted(to_addr);
815 }
816
817 void maybe_string_dedup(zaddress to_addr) {
818 if (_forwarding->is_promotion()) {
819 // Only deduplicate promoted objects, and let short-lived strings simply die instead.
820 _string_dedup_context.request(to_oop(to_addr));
821 }
822 }
823
824 bool try_relocate_object(zaddress from_addr, uint32_t partition_id) {
825 const zaddress to_addr = try_relocate_object_inner(from_addr, partition_id);
826
827 if (is_null(to_addr)) {
828 return false;
829 }
830
831 update_remset_for_fields(from_addr, to_addr);
832
833 maybe_string_dedup(to_addr);
834
835 return true;
836 }
837
838 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
839 if (_forwarding->from_age() != ZPageAge::old) {
840 // Only old pages have use remset bits
841 return;
842 }
843
844 if (ZGeneration::old()->active_remset_is_current()) {
845 // We want to iterate over and clear the remset bits of the from-space page,
846 // and insert current bits in the to-space page. However, with in-place
847 // relocation, the from-space and to-space pages are the same. Clearing
848 // is destructive, and is difficult to perform before or during the iteration.
849 // However, clearing of the current bits has to be done before exposing the
850 // to-space objects in the forwarding table.
851 //
852 // To solve this tricky dependency problem, we start by stashing away the
853 // current bits in the previous bits, and clearing the current bits
854 // (implemented by swapping the bits). This way, the current bits are
855 // cleared before copying the objects (like a normal to-space page),
856 // and the previous bits are representing a copy of the current bits
857 // of the from-space page, and are used for iteration.
858 from_page->swap_remset_bitmaps();
859 }
860 }
861
862 ZPage* start_in_place_relocation(zoffset relocated_watermark) {
863 _forwarding->in_place_relocation_claim_page();
864 _forwarding->in_place_relocation_start(relocated_watermark);
865
866 ZPage* const from_page = _forwarding->page();
867
868 const ZPageAge to_age = _forwarding->to_age();
869 const bool promotion = _forwarding->is_promotion();
870
871 // Promotions happen through a new cloned page
872 ZPage* const to_page = promotion
873 ? from_page->clone_for_promotion()
874 : from_page->reset(to_age);
875
876 // Reset page for in-place relocation
877 to_page->reset_top_for_allocation();
878
879 // Verify that the inactive remset is clear when resetting the page for
880 // in-place relocation.
881 if (from_page->age() == ZPageAge::old) {
882 if (ZGeneration::old()->active_remset_is_current()) {
883 to_page->verify_remset_cleared_previous();
884 } else {
885 to_page->verify_remset_cleared_current();
886 }
887 }
888
889 // Clear remset bits for all objects that were relocated
890 // before this page became an in-place relocated page.
891 start_in_place_relocation_prepare_remset(from_page);
892
893 if (promotion) {
894 // Register the promotion
895 ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
896 ZGeneration::young()->register_in_place_relocate_promoted(from_page);
897 }
898
899 return to_page;
900 }
901
902 void relocate_object(oop obj) {
903 const zaddress addr = to_zaddress(obj);
904 assert(ZHeap::heap()->is_object_live(addr), "Should be live");
905
906 const ZPageAge to_age = _forwarding->to_age();
907 const uint32_t partition_id = _forwarding->partition_id();
908
909 while (!try_relocate_object(addr, partition_id)) {
910 // Failed to relocate object, try to allocate a new target page,
911 // or if that fails, use the page being relocated as the new target,
912 // which will cause it to be relocated in-place.
913 ZPage* const target_page = _targets->get(partition_id, to_age);
914 ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target_page);
915 _targets->set(partition_id, to_age, to_page);
916
917 // We got a new page, retry relocation
918 if (to_page != nullptr) {
919 continue;
920 }
921
922 // Start in-place relocation to block other threads from accessing
923 // the page, or its forwarding table, until it has been released
924 // (relocation completed).
925 to_page = start_in_place_relocation(ZAddress::offset(addr));
926 _targets->set(partition_id, to_age, to_page);
927 }
928 }
929
930 public:
931 ZRelocateWork(Allocator* allocator, ZRelocationTargets* targets, ZGeneration* generation)
932 : _allocator(allocator),
933 _forwarding(nullptr),
934 _targets(targets),
935 _generation(generation),
936 _other_promoted(0),
937 _other_compacted(0) {}
938
939 ~ZRelocateWork() {
940 _targets->apply_and_clear_targets([&](ZPage* page) {
941 _allocator->free_target_page(page);
942 });
943
944 // Report statistics on-behalf of non-worker threads
945 _generation->increase_promoted(_other_promoted);
946 _generation->increase_compacted(_other_compacted);
947 }
948
949 bool active_remset_is_current() const {
950 // Normal old-to-old relocation can treat the from-page remset as a
951 // read-only copy, and then copy over the appropriate remset bits to the
952 // cleared to-page's 'current' remset bitmap.
953 //
954 // In-place relocation is more complicated. Since, the same page is both
955 // a from-page and a to-page, we need to remove the old remset bits, and
956 // add remset bits that corresponds to the new locations of the relocated
957 // objects.
958 //
959 // Depending on how long ago (in terms of number of young GC's and the
960 // current young GC's phase), the page was allocated, the active
961 // remembered set will be in either the 'current' or 'previous' bitmap.
962 //
963 // If the active bits are in the 'previous' bitmap, we know that the
964 // 'current' bitmap was cleared at some earlier point in time, and we can
965 // simply set new bits in 'current' bitmap, and later when relocation has
966 // read all the old remset bits, we could just clear the 'previous' remset
967 // bitmap.
968 //
969 // If, on the other hand, the active bits are in the 'current' bitmap, then
970 // that bitmap will be used to both read the old remset bits, and the
971 // destination for the remset bits that we copy when an object is copied
972 // to it's new location within the page. We need to *carefully* remove all
973 // all old remset bits, without clearing out the newly set bits.
974 return ZGeneration::old()->active_remset_is_current();
975 }
976
977 void clear_remset_before_in_place_reuse(ZPage* page) {
978 if (_forwarding->from_age() != ZPageAge::old) {
979 // No remset bits
980 return;
981 }
982
983 // Clear 'previous' remset bits. For in-place relocated pages, the previous
984 // remset bits are always used, even when active_remset_is_current().
985 page->clear_remset_previous();
986 }
987
988 void finish_in_place_relocation() {
989 // We are done with the from_space copy of the page
990 _forwarding->in_place_relocation_finish();
991 }
992
993 void do_forwarding(ZForwarding* forwarding) {
994 _forwarding = forwarding;
995
996 _forwarding->page()->log_msg(" (relocate page)");
997
998 ZVerify::before_relocation(_forwarding);
999
1000 // Relocate objects
1001 _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
1002
1003 ZVerify::after_relocation(_forwarding);
1004
1005 // Verify
1006 if (ZVerifyForwarding) {
1007 _forwarding->verify();
1008 }
1009
1010 _generation->increase_freed(_forwarding->page()->size());
1011
1012 // Deal with in-place relocation
1013 const bool in_place = _forwarding->in_place_relocation();
1014 if (in_place) {
1015 finish_in_place_relocation();
1016 }
1017
1018 // Old from-space pages need to deal with remset bits
1019 if (_forwarding->from_age() == ZPageAge::old) {
1020 _forwarding->relocated_remembered_fields_after_relocate();
1021 }
1022
1023 // Release relocated page
1024 _forwarding->release_page();
1025
1026 if (in_place) {
1027 // Wait for all other threads to call release_page
1028 ZPage* const page = _forwarding->detach_page();
1029
1030 // Ensure that previous remset bits are cleared
1031 clear_remset_before_in_place_reuse(page);
1032
1033 page->log_msg(" (relocate page done in-place)");
1034
1035 // Different pages when promoting
1036 const uint32_t target_partition = _forwarding->partition_id();
1037 ZPage* const target_page = _targets->get(target_partition, _forwarding->to_age());
1038 _allocator->share_target_page(target_page, target_partition);
1039
1040 } else {
1041 // Wait for all other threads to call release_page
1042 ZPage* const page = _forwarding->detach_page();
1043
1044 page->log_msg(" (relocate page done normal)");
1045
1046 // Free page
1047 ZHeap::heap()->free_page(page);
1048 }
1049 }
1050 };
1051
1052 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1053 public:
1054 virtual void do_thread(Thread* thread) {
1055 JavaThread* const jt = JavaThread::cast(thread);
1056 ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1057 buffer->install_base_pointers();
1058 }
1059 };
1060
1061 // Installs the object base pointers (object starts), for the fields written
1062 // in the store buffer. The code that searches for the object start uses that
1063 // liveness information stored in the pages. That information is lost when the
1064 // pages have been relocated and then destroyed.
1065 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1066 private:
1067 ZJavaThreadsIterator _threads_iter;
1068
1069 public:
1070 ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1071 : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1072 _threads_iter(generation->id_optional()) {}
1073
1074 virtual void work() {
1075 ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1076 _threads_iter.apply(&fix_store_buffer_cl);
1077 }
1078 };
1079
1080 class ZRelocateTask : public ZRestartableTask {
1081 private:
1082 ZGeneration* const _generation;
1083 ZRelocateQueue* const _queue;
1084 ZPerNUMA<ZRelocationSetParallelIterator>* _iters;
1085 ZPerWorker<ZRelocationTargets>* _small_targets;
1086 ZPerWorker<ZRelocationTargets>* _medium_targets;
1087 ZRelocateSmallAllocator _small_allocator;
1088 ZRelocateMediumAllocator _medium_allocator;
1089 const size_t _total_forwardings;
1090 volatile size_t _numa_local_forwardings;
1091
1092 public:
1093 ZRelocateTask(ZRelocationSet* relocation_set,
1094 ZRelocateQueue* queue,
1095 ZPerNUMA<ZRelocationSetParallelIterator>* iters,
1096 ZPerWorker<ZRelocationTargets>* small_targets,
1097 ZPerWorker<ZRelocationTargets>* medium_targets,
1098 ZRelocationTargets* shared_medium_targets)
1099 : ZRestartableTask("ZRelocateTask"),
1100 _generation(relocation_set->generation()),
1101 _queue(queue),
1102 _iters(iters),
1103 _small_targets(small_targets),
1104 _medium_targets(medium_targets),
1105 _small_allocator(_generation),
1106 _medium_allocator(_generation, shared_medium_targets),
1107 _total_forwardings(relocation_set->nforwardings()),
1108 _numa_local_forwardings(0) {
1109
1110 for (uint32_t i = 0; i < ZNUMA::count(); i++) {
1111 ZRelocationSetParallelIterator* const iter = _iters->addr(i);
1112
1113 // Destruct the iterator from the previous GC-cycle, which is a temporary
1114 // iterator if this is the first GC-cycle.
1115 iter->~ZRelocationSetParallelIterator();
1116
1117 // In-place construct the iterator with the current relocation set
1118 ::new (iter) ZRelocationSetParallelIterator(relocation_set);
1119 }
1120 }
1121
1122 ~ZRelocateTask() {
1123 _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1124
1125 // Signal that we're not using the queue anymore. Used mostly for asserts.
1126 _queue->deactivate();
1127
1128 if (ZNUMA::is_enabled()) {
1129 log_debug(gc, reloc, numa)("Forwardings relocated NUMA-locally: %zu / %zu (%.0f%%)",
1130 _numa_local_forwardings, _total_forwardings, percent_of(_numa_local_forwardings, _total_forwardings));
1131 }
1132 }
1133
1134 virtual void work() {
1135 ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _small_targets->addr(), _generation);
1136 ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _medium_targets->addr(), _generation);
1137 const uint32_t num_nodes = ZNUMA::count();
1138 uint32_t numa_local_forwardings_worker = 0;
1139
1140 const auto do_forwarding = [&](ZForwarding* forwarding) {
1141 ZPage* const page = forwarding->page();
1142 if (page->is_small()) {
1143 small.do_forwarding(forwarding);
1144 } else {
1145 medium.do_forwarding(forwarding);
1146 }
1147
1148 // Absolute last thing done while relocating a page.
1149 //
1150 // We don't use the SuspendibleThreadSet when relocating pages.
1151 // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1152 //
1153 // After the mark_done call a safepointing could be completed and a
1154 // new GC phase could be entered.
1155 forwarding->mark_done();
1156 };
1157
1158 const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1159 if (forwarding->claim()) {
1160 do_forwarding(forwarding);
1161 }
1162 };
1163
1164 const auto check_numa_local = [&](ZForwarding* forwarding, uint32_t numa_id) {
1165 return forwarding->partition_id() == numa_id;
1166 };
1167
1168 const auto do_forwarding_one_from_iter = [&]() {
1169 ZForwarding* forwarding;
1170 const uint32_t start_node = ZNUMA::id();
1171 uint32_t current_node = start_node;
1172
1173 for (uint32_t i = 0; i < num_nodes; i++) {
1174 if (_iters->get(current_node).next_if(&forwarding, check_numa_local, current_node)) {
1175 claim_and_do_forwarding(forwarding);
1176
1177 if (current_node == start_node) {
1178 // Track if this forwarding was relocated on the local NUMA node
1179 numa_local_forwardings_worker++;
1180 }
1181
1182 return true;
1183 }
1184
1185 // Check next node.
1186 current_node = (current_node + 1) % num_nodes;
1187 }
1188
1189 return false;
1190 };
1191
1192 for (;;) {
1193 // As long as there are requests in the relocate queue, there are threads
1194 // waiting in a VM state that does not allow them to be blocked. The
1195 // worker thread needs to finish relocate these pages, and allow the
1196 // other threads to continue and proceed to a blocking state. After that,
1197 // the worker threads are allowed to safepoint synchronize.
1198 for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1199 do_forwarding(forwarding);
1200 }
1201
1202 if (!do_forwarding_one_from_iter()) {
1203 // No more work
1204 break;
1205 }
1206
1207 if (_generation->should_worker_resize()) {
1208 break;
1209 }
1210 }
1211
1212 if (ZNUMA::is_enabled()) {
1213 AtomicAccess::add(&_numa_local_forwardings, numa_local_forwardings_worker, memory_order_relaxed);
1214 }
1215
1216 _queue->leave();
1217 }
1218
1219 virtual void resize_workers(uint nworkers) {
1220 _queue->resize_workers(nworkers);
1221 }
1222 };
1223
1224 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1225 const zpointer ptr = AtomicAccess::load(p);
1226
1227 if (ZPointer::is_store_good(ptr)) {
1228 // Already has a remset entry
1229 return;
1230 }
1231
1232 // Remset entries are used for two reasons:
1233 // 1) Young marking old-to-young pointer roots
1234 // 2) Deferred remapping of stale old-to-young pointers
1235 //
1236 // This load barrier will up-front perform the remapping of (2),
1237 // and the code below only has to make sure we register up-to-date
1238 // old-to-young pointers for (1).
1239 const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1240
1241 if (is_null(addr)) {
1242 // No need for remset entries for null pointers
1243 return;
1244 }
1245
1246 if (ZHeap::heap()->is_old(addr)) {
1247 // No need for remset entries for pointers to old gen
1248 return;
1249 }
1250
1251 ZRelocate::add_remset(p);
1252 }
1253
1254 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1255 private:
1256 ZStatTimerYoung _timer;
1257 ZArrayParallelIterator<ZPage*> _iter;
1258
1259 public:
1260 ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1261 : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1262 _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1263 _iter(pages) {}
1264
1265 virtual void work() {
1266 SuspendibleThreadSetJoiner sts_joiner;
1267 ZStringDedupContext string_dedup_context;
1268
1269 for (ZPage* page; _iter.next(&page);) {
1270 page->object_iterate([&](oop obj) {
1271 // Remap oops and add remset if needed
1272 ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1273
1274 // String dedup
1275 string_dedup_context.request(obj);
1276 });
1277
1278 SuspendibleThreadSet::yield();
1279 if (ZGeneration::young()->should_worker_resize()) {
1280 return;
1281 }
1282 }
1283 }
1284 };
1285
1286 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1287 {
1288 // Install the store buffer's base pointers before the
1289 // relocate task destroys the liveness information in
1290 // the relocated pages.
1291 ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1292 workers()->run(&buffer_task);
1293 }
1294
1295 {
1296 ZRelocateTask relocate_task(relocation_set, &_queue, &_iters, &_small_targets, &_medium_targets, &_shared_medium_targets);
1297 workers()->run(&relocate_task);
1298 }
1299
1300 if (relocation_set->generation()->is_young()) {
1301 ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1302 workers()->run(&task);
1303 }
1304 }
1305
1306 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1307 if (from_age == ZPageAge::old) {
1308 return ZPageAge::old;
1309 }
1310
1311 const uint age = untype(from_age);
1312 if (age >= ZGeneration::young()->tenuring_threshold()) {
1313 return ZPageAge::old;
1314 }
1315
1316 return to_zpageage(age + 1);
1317 }
1318
1319 class ZFlipAgePagesTask : public ZTask {
1320 private:
1321 ZArrayParallelIterator<ZPage*> _iter;
1322
1323 public:
1324 ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1325 : ZTask("ZFlipAgePagesTask"),
1326 _iter(pages) {}
1327
1328 virtual void work() {
1329 SuspendibleThreadSetJoiner sts_joiner;
1330 ZArray<ZPage*> promoted_pages;
1331
1332 for (ZPage* prev_page; _iter.next(&prev_page);) {
1333 const ZPageAge from_age = prev_page->age();
1334 const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1335 assert(from_age != ZPageAge::old, "invalid age for a young collection");
1336
1337 // Figure out if this is proper promotion
1338 const bool promotion = to_age == ZPageAge::old;
1339
1340 // Logging
1341 prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1342
1343 // Setup to-space page
1344 ZPage* const new_page = promotion
1345 ? prev_page->clone_for_promotion()
1346 : prev_page->reset(to_age);
1347
1348 // Reset page for flip aging
1349 new_page->reset_livemap();
1350
1351 if (promotion) {
1352 ZGeneration::young()->flip_promote(prev_page, new_page);
1353 // Defer promoted page registration
1354 promoted_pages.push(prev_page);
1355 }
1356
1357 SuspendibleThreadSet::yield();
1358 }
1359
1360 ZGeneration::young()->register_flip_promoted(promoted_pages);
1361 }
1362 };
1363
1364 class ZPromoteBarrierTask : public ZTask {
1365 private:
1366 ZArrayParallelIterator<ZPage*> _iter;
1367
1368 public:
1369 ZPromoteBarrierTask(const ZArray<ZPage*>* pages)
1370 : ZTask("ZPromoteBarrierTask"),
1371 _iter(pages) {}
1372
1373 virtual void work() {
1374 SuspendibleThreadSetJoiner sts_joiner;
1375
1376 for (ZPage* page; _iter.next(&page);) {
1377 // When promoting an object (and before relocate start), we must ensure that all
1378 // contained zpointers are store good. The marking code ensures that for non-null
1379 // pointers, but null pointers are ignored. This code ensures that even null pointers
1380 // are made store good, for the promoted objects.
1381 page->object_iterate([&](oop obj) {
1382 ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1383 });
1384
1385 SuspendibleThreadSet::yield();
1386 }
1387 }
1388 };
1389
1390 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1391 ZFlipAgePagesTask flip_age_task(pages);
1392 workers()->run(&flip_age_task);
1393 }
1394
1395 void ZRelocate::barrier_flip_promoted_pages(const ZArray<ZPage*>* pages) {
1396 ZPromoteBarrierTask promote_barrier_task(pages);
1397 workers()->run(&promote_barrier_task);
1398 }
1399
1400 void ZRelocate::synchronize() {
1401 _queue.synchronize();
1402 }
1403
1404 void ZRelocate::desynchronize() {
1405 _queue.desynchronize();
1406 }
1407
1408 ZRelocateQueue* ZRelocate::queue() {
1409 return &_queue;
1410 }
1411
1412 bool ZRelocate::is_queue_active() const {
1413 return _queue.is_active();
1414 }