1 /*
2 * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "gc/shared/gc_globals.hpp"
25 #include "gc/shared/suspendibleThreadSet.hpp"
26 #include "gc/z/zAbort.inline.hpp"
27 #include "gc/z/zAddress.inline.hpp"
28 #include "gc/z/zBarrier.inline.hpp"
29 #include "gc/z/zCollectedHeap.hpp"
30 #include "gc/z/zForwarding.inline.hpp"
31 #include "gc/z/zGeneration.inline.hpp"
32 #include "gc/z/zHeap.inline.hpp"
33 #include "gc/z/zIndexDistributor.inline.hpp"
34 #include "gc/z/zIterator.inline.hpp"
35 #include "gc/z/zNUMA.inline.hpp"
36 #include "gc/z/zObjectAllocator.hpp"
37 #include "gc/z/zPage.inline.hpp"
38 #include "gc/z/zPageAge.inline.hpp"
39 #include "gc/z/zRelocate.hpp"
40 #include "gc/z/zRelocationSet.inline.hpp"
41 #include "gc/z/zRootsIterator.hpp"
42 #include "gc/z/zStackWatermark.hpp"
43 #include "gc/z/zStat.hpp"
44 #include "gc/z/zStringDedup.inline.hpp"
45 #include "gc/z/zTask.hpp"
46 #include "gc/z/zUncoloredRoot.inline.hpp"
47 #include "gc/z/zValue.inline.hpp"
48 #include "gc/z/zVerify.hpp"
49 #include "gc/z/zWorkers.hpp"
50 #include "prims/jvmtiTagMap.hpp"
51 #include "runtime/atomicAccess.hpp"
52 #include "utilities/debug.hpp"
53
54 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
55 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
56
57 ZRelocateQueue::ZRelocateQueue()
58 : _lock(),
59 _queue(),
60 _nworkers(0),
61 _nsynchronized(0),
62 _synchronize(false),
63 _is_active(false),
64 _needs_attention(0) {}
65
66 bool ZRelocateQueue::needs_attention() const {
67 return AtomicAccess::load(&_needs_attention) != 0;
68 }
69
70 void ZRelocateQueue::inc_needs_attention() {
71 const int needs_attention = AtomicAccess::add(&_needs_attention, 1);
72 assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
73 }
74
75 void ZRelocateQueue::dec_needs_attention() {
76 const int needs_attention = AtomicAccess::sub(&_needs_attention, 1);
77 assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
78 }
79
80 void ZRelocateQueue::activate(uint nworkers) {
81 _is_active = true;
82 join(nworkers);
83 }
84
85 void ZRelocateQueue::deactivate() {
86 AtomicAccess::store(&_is_active, false);
87 clear();
88 }
89
90 bool ZRelocateQueue::is_active() const {
91 return AtomicAccess::load(&_is_active);
92 }
93
94 void ZRelocateQueue::join(uint nworkers) {
95 assert(nworkers != 0, "Must request at least one worker");
96 assert(_nworkers == 0, "Invalid state");
97 assert(_nsynchronized == 0, "Invalid state");
98
99 log_debug(gc, reloc)("Joining workers: %u", nworkers);
100
101 _nworkers = nworkers;
102 }
103
104 void ZRelocateQueue::resize_workers(uint nworkers) {
105 assert(nworkers != 0, "Must request at least one worker");
106 assert(_nworkers == 0, "Invalid state");
107 assert(_nsynchronized == 0, "Invalid state");
108
109 log_debug(gc, reloc)("Resize workers: %u", nworkers);
110
111 ZLocker<ZConditionLock> locker(&_lock);
112 _nworkers = nworkers;
113 }
114
115 void ZRelocateQueue::leave() {
116 ZLocker<ZConditionLock> locker(&_lock);
117 _nworkers--;
118
119 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
120
121 log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
122
123 // Prune done forwardings
124 const bool forwardings_done = prune();
125
126 // Check if all workers synchronized
127 const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
128
129 if (forwardings_done || last_synchronized) {
130 _lock.notify_all();
131 }
132 }
133
134 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
135 ZStatTimer timer(ZCriticalPhaseRelocationStall);
136 ZLocker<ZConditionLock> locker(&_lock);
137
138 if (forwarding->is_done()) {
139 return;
140 }
141
142 _queue.append(forwarding);
143 if (_queue.length() == 1) {
144 // Queue became non-empty
145 inc_needs_attention();
146 _lock.notify_all();
147 }
148
149 while (!forwarding->is_done()) {
150 _lock.wait();
151 }
152 }
153
154 bool ZRelocateQueue::prune() {
155 if (_queue.is_empty()) {
156 return false;
157 }
158
159 bool done = false;
160
161 for (int i = 0; i < _queue.length();) {
162 const ZForwarding* const forwarding = _queue.at(i);
163 if (forwarding->is_done()) {
164 done = true;
165
166 _queue.delete_at(i);
167 } else {
168 i++;
169 }
170 }
171
172 if (_queue.is_empty()) {
173 dec_needs_attention();
174 }
175
176 return done;
177 }
178
179 ZForwarding* ZRelocateQueue::prune_and_claim() {
180 if (prune()) {
181 _lock.notify_all();
182 }
183
184 for (int i = 0; i < _queue.length(); i++) {
185 ZForwarding* const forwarding = _queue.at(i);
186 if (forwarding->claim()) {
187 return forwarding;
188 }
189 }
190
191 return nullptr;
192 }
193
194 class ZRelocateQueueSynchronizeThread {
195 private:
196 ZRelocateQueue* const _queue;
197
198 public:
199 ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
200 : _queue(queue) {
201 _queue->synchronize_thread();
202 }
203
204 ~ZRelocateQueueSynchronizeThread() {
205 _queue->desynchronize_thread();
206 }
207 };
208
209 void ZRelocateQueue::synchronize_thread() {
210 _nsynchronized++;
211
212 log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
213
214 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
215 if (_nsynchronized == _nworkers) {
216 // All workers synchronized
217 _lock.notify_all();
218 }
219 }
220
221 void ZRelocateQueue::desynchronize_thread() {
222 _nsynchronized--;
223
224 log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
225
226 assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
227 }
228
229 ZForwarding* ZRelocateQueue::synchronize_poll() {
230 // Fast path avoids locking
231 if (!needs_attention()) {
232 return nullptr;
233 }
234
235 // Slow path to get the next forwarding and/or synchronize
236 ZLocker<ZConditionLock> locker(&_lock);
237
238 {
239 ZForwarding* const forwarding = prune_and_claim();
240 if (forwarding != nullptr) {
241 // Don't become synchronized while there are elements in the queue
242 return forwarding;
243 }
244 }
245
246 if (!_synchronize) {
247 return nullptr;
248 }
249
250 ZRelocateQueueSynchronizeThread rqst(this);
251
252 do {
253 _lock.wait();
254
255 ZForwarding* const forwarding = prune_and_claim();
256 if (forwarding != nullptr) {
257 return forwarding;
258 }
259 } while (_synchronize);
260
261 return nullptr;
262 }
263
264 void ZRelocateQueue::clear() {
265 assert(_nworkers == 0, "Invalid state");
266
267 if (_queue.is_empty()) {
268 return;
269 }
270
271 ZArrayIterator<ZForwarding*> iter(&_queue);
272 for (ZForwarding* forwarding; iter.next(&forwarding);) {
273 assert(forwarding->is_done(), "All should be done");
274 }
275
276 assert(false, "Clear was not empty");
277
278 _queue.clear();
279 dec_needs_attention();
280 }
281
282 void ZRelocateQueue::synchronize() {
283 ZLocker<ZConditionLock> locker(&_lock);
284 _synchronize = true;
285
286 inc_needs_attention();
287
288 log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
289
290 while (_nworkers != _nsynchronized) {
291 _lock.wait();
292 log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
293 }
294 }
295
296 void ZRelocateQueue::desynchronize() {
297 ZLocker<ZConditionLock> locker(&_lock);
298 _synchronize = false;
299
300 log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
301
302 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
303
304 dec_needs_attention();
305
306 _lock.notify_all();
307 }
308
309 ZRelocationTargets::ZRelocationTargets()
310 : _targets() {}
311
312 ZPage* ZRelocationTargets::get(uint32_t partition_id, ZPageAge age) {
313 return _targets.get(partition_id)[untype(age) - 1];
314 }
315
316 void ZRelocationTargets::set(uint32_t partition_id, ZPageAge age, ZPage* page) {
317 _targets.get(partition_id)[untype(age) - 1] = page;
318 }
319
320 template <typename Function>
321 void ZRelocationTargets::apply_and_clear_targets(Function function) {
322 ZPerNUMAIterator<TargetArray> iter(&_targets);
323 for (TargetArray* targets; iter.next(&targets);) {
324 for (size_t i = 0; i < ZNumRelocationAges; i++) {
325 // Apply function
326 function((*targets)[i]);
327
328 // Clear target
329 (*targets)[i] = nullptr;
330 }
331 }
332 }
333
334 ZRelocate::ZRelocate(ZGeneration* generation)
335 : _generation(generation),
336 _queue(),
337 _iters(),
338 _small_targets(),
339 _medium_targets(),
340 _shared_medium_targets() {}
341
342 ZWorkers* ZRelocate::workers() const {
343 return _generation->workers();
344 }
345
346 void ZRelocate::start() {
347 _queue.activate(workers()->active_workers());
348 }
349
350 void ZRelocate::add_remset(volatile zpointer* p) {
351 ZGeneration::young()->remember(p);
352 }
353
354 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
355 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
356
357 // Allocate object
358 const size_t old_size = ZUtils::object_size(from_addr);
359 const size_t size = ZUtils::copy_size(from_addr, old_size);
360 const ZPageAge to_age = forwarding->to_age();
361
362 const zaddress to_addr = ZHeap::heap()->alloc_object_for_relocation(size, to_age);
363
364 if (is_null(to_addr)) {
365 // Allocation failed
366 return zaddress::null;
367 }
368 assert(to_addr != from_addr, "addresses must be different");
369
370 // Copy object
371 ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
372 ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
373
374 // Insert forwarding
375 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
376
377 if (to_addr_final != to_addr) {
378 // Already relocated, try undo allocation
379 ZHeap::heap()->undo_alloc_object_for_relocation(to_addr, size);
380 }
381
382 return to_addr_final;
383 }
384
385 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
386 ZForwardingCursor cursor;
387
388 // Lookup forwarding
389 zaddress to_addr = forwarding->find(from_addr, &cursor);
390 if (!is_null(to_addr)) {
391 // Already relocated
392 return to_addr;
393 }
394
395 // Relocate object
396 if (forwarding->retain_page(&_queue)) {
397 assert(_generation->is_phase_relocate(), "Must be");
398 to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
399 forwarding->release_page();
400
401 if (!is_null(to_addr)) {
402 // Success
403 return to_addr;
404 }
405
406 // Failed to relocate object. Signal and wait for a worker thread to
407 // complete relocation of this page, and then forward the object.
408 _queue.add_and_wait(forwarding);
409 }
410
411 // Forward object
412 return forward_object(forwarding, from_addr);
413 }
414
415 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
416 const zaddress to_addr = forwarding->find(from_addr);
417 assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
418 return to_addr;
419 }
420
421 static ZPage* alloc_page(ZForwarding* forwarding) {
422 if (ZStressRelocateInPlace) {
423 // Simulate failure to allocate a new page. This will
424 // cause the page being relocated to be relocated in-place.
425 return nullptr;
426 }
427
428 const ZPageType type = forwarding->type();
429 const size_t size = forwarding->size();
430 const ZPageAge age = forwarding->to_age();
431 const uint32_t preferred_partition = forwarding->partition_id();
432
433 ZAllocationFlags flags;
434 flags.set_non_blocking();
435 flags.set_gc_relocation();
436
437 return ZHeap::heap()->alloc_page(type, size, flags, age, preferred_partition);
438 }
439
440 static void retire_target_page(ZGeneration* generation, ZPage* page) {
441 if (generation->is_young() && page->is_old()) {
442 generation->increase_promoted(page->used());
443 } else {
444 generation->increase_compacted(page->used());
445 }
446
447 // Free target page if it is empty. We can end up with an empty target
448 // page if we allocated a new target page, and then lost the race to
449 // relocate the remaining objects, leaving the target page empty when
450 // relocation completed.
451 if (page->used() == 0) {
452 ZHeap::heap()->free_page(page);
453 }
454 }
455
456 class ZRelocateSmallAllocator {
457 private:
458 ZGeneration* const _generation;
459 volatile size_t _in_place_count;
460
461 public:
462 ZRelocateSmallAllocator(ZGeneration* generation)
463 : _generation(generation),
464 _in_place_count(0) {}
465
466 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
467 ZPage* const page = alloc_page(forwarding);
468 if (page == nullptr) {
469 AtomicAccess::inc(&_in_place_count);
470 }
471
472 if (target != nullptr) {
473 // Retire the old target page
474 retire_target_page(_generation, target);
475 }
476
477 return page;
478 }
479
480 void share_target_page(ZPage* page, uint32_t partition_id) {
481 // Does nothing
482 }
483
484 void free_target_page(ZPage* page) {
485 if (page != nullptr) {
486 retire_target_page(_generation, page);
487 }
488 }
489
490 zaddress alloc_object(ZPage* page, size_t size) const {
491 return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
492 }
493
494 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
495 page->undo_alloc_object(addr, size);
496 }
497
498 size_t in_place_count() const {
499 return _in_place_count;
500 }
501 };
502
503 class ZRelocateMediumAllocator {
504 private:
505 ZGeneration* const _generation;
506 ZConditionLock _lock;
507 ZRelocationTargets* _shared_targets;
508 bool _in_place;
509 volatile size_t _in_place_count;
510
511 public:
512 ZRelocateMediumAllocator(ZGeneration* generation, ZRelocationTargets* shared_targets)
513 : _generation(generation),
514 _lock(),
515 _shared_targets(shared_targets),
516 _in_place(false),
517 _in_place_count(0) {}
518
519 ~ZRelocateMediumAllocator() {
520 _shared_targets->apply_and_clear_targets([&](ZPage* page) {
521 if (page != nullptr) {
522 retire_target_page(_generation, page);
523 }
524 });
525 }
526
527 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
528 ZLocker<ZConditionLock> locker(&_lock);
529
530 // Wait for any ongoing in-place relocation to complete
531 while (_in_place) {
532 _lock.wait();
533 }
534
535 // Allocate a new page only if the shared page is the same as the
536 // current target page. The shared page will be different from the
537 // current target page if another thread shared a page, or allocated
538 // a new page.
539 const ZPageAge to_age = forwarding->to_age();
540 const uint32_t partition_id = forwarding->partition_id();
541 if (_shared_targets->get(partition_id, to_age) == target) {
542 ZPage* const to_page = alloc_page(forwarding);
543 _shared_targets->set(partition_id, to_age, to_page);
544 if (to_page == nullptr) {
545 AtomicAccess::inc(&_in_place_count);
546 _in_place = true;
547 }
548
549 // This thread is responsible for retiring the shared target page
550 if (target != nullptr) {
551 retire_target_page(_generation, target);
552 }
553 }
554
555 return _shared_targets->get(partition_id, to_age);
556 }
557
558 void share_target_page(ZPage* page, uint32_t partition_id) {
559 const ZPageAge age = page->age();
560
561 ZLocker<ZConditionLock> locker(&_lock);
562 assert(_in_place, "Invalid state");
563 assert(_shared_targets->get(partition_id, age) == nullptr, "Invalid state");
564 assert(page != nullptr, "Invalid page");
565
566 _shared_targets->set(partition_id, age, page);
567 _in_place = false;
568
569 _lock.notify_all();
570 }
571
572 void free_target_page(ZPage* page) {
573 // Does nothing
574 }
575
576 zaddress alloc_object(ZPage* page, size_t size) const {
577 return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
578 }
579
580 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
581 page->undo_alloc_object_atomic(addr, size);
582 }
583
584 size_t in_place_count() const {
585 return _in_place_count;
586 }
587 };
588
589 template <typename Allocator>
590 class ZRelocateWork : public StackObj {
591 private:
592 Allocator* const _allocator;
593 ZForwarding* _forwarding;
594 ZRelocationTargets* _targets;
595 ZGeneration* const _generation;
596 size_t _other_promoted;
597 size_t _other_compacted;
598 ZStringDedupContext _string_dedup_context;
599
600 size_t object_alignment() const {
601 return (size_t)1 << _forwarding->object_alignment_shift();
602 }
603
604 void increase_other_forwarded(size_t unaligned_object_size) {
605 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
606 if (_forwarding->is_promotion()) {
607 _other_promoted += aligned_size;
608 } else {
609 _other_compacted += aligned_size;
610 }
611 }
612
613 zaddress try_relocate_object_inner(zaddress from_addr, uint32_t partition_id, size_t old_size) {
614 ZForwardingCursor cursor;
615
616 ZPage* const to_page = _targets->get(partition_id, _forwarding->to_age());
617 zoffset_end from_offset = to_zoffset_end(ZAddress::offset(from_addr));
618 zoffset_end top = to_page != nullptr ? to_page->top() : to_zoffset_end(0);
619 const size_t new_size = ZUtils::copy_size(from_addr, old_size);
620 const size_t size = top == from_offset ? old_size : new_size;
621
622 // Lookup forwarding
623 {
624 const zaddress to_addr = _forwarding->find(from_addr, &cursor);
625 if (!is_null(to_addr)) {
626 // Already relocated
627 increase_other_forwarded(size);
628 return to_addr;
629 }
630 }
631
632 // Allocate object
633 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
634 if (is_null(allocated_addr)) {
635 // Allocation failed
636 return zaddress::null;
637 }
638 if (old_size != new_size && ((top == from_offset) != (allocated_addr == from_addr))) {
639 _allocator->undo_alloc_object(to_page, allocated_addr, size);
640 return zaddress::null;
641 }
642
643 // Copy object. Use conjoint copying if we are relocating
644 // in-place and the new object overlaps with the old object.
645 if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
646 ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
647 } else {
648 ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
649 }
650 if (from_addr != allocated_addr) {
651 ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
652 }
653
654 // Insert forwarding
655 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
656 if (to_addr != allocated_addr) {
657 // Already relocated, undo allocation
658 _allocator->undo_alloc_object(to_page, to_addr, size);
659 increase_other_forwarded(size);
660 }
661
662 return to_addr;
663 }
664
665 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
666 // Old-to-old relocation - move existing remset bits
667
668 // If this is called for an in-place relocated page, then this code has the
669 // responsibility to clear the old remset bits. Extra care is needed because:
670 //
671 // 1) The to-object copy can overlap with the from-object copy
672 // 2) Remset bits of old objects need to be cleared
673 //
674 // A watermark is used to keep track of how far the old remset bits have been removed.
675
676 const bool in_place = _forwarding->in_place_relocation();
677 ZPage* const from_page = _forwarding->page();
678 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
679
680 // Note: even with in-place relocation, the to_page could be another page
681 ZPage* const to_page = ZHeap::heap()->page(to_addr);
682
683 // Uses _relaxed version to handle that in-place relocation resets _top
684 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
685 assert(to_page->is_in(to_addr), "Must be");
686
687 assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
688 assert(size > 0, "size must be set");
689
690 // If a young generation collection started while the old generation
691 // relocated objects, the remember set bits were flipped from "current"
692 // to "previous".
693 //
694 // We need to select the correct remembered sets bitmap to ensure that the
695 // old remset bits are found.
696 //
697 // Note that if the young generation marking (remset scanning) finishes
698 // before the old generation relocation has relocated this page, then the
699 // young generation will visit this page's previous remembered set bits and
700 // moved them over to the current bitmap.
701 //
702 // If the young generation runs multiple cycles while the old generation is
703 // relocating, then the first cycle will have consumed the old remset,
704 // bits and moved associated objects to a new old page. The old relocation
705 // could find either of the two bitmaps. So, either it will find the original
706 // remset bits for the page, or it will find an empty bitmap for the page. It
707 // doesn't matter for correctness, because the young generation marking has
708 // already taken care of the bits.
709
710 const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
711
712 // When in-place relocation is done and the old remset bits are located in
713 // the bitmap that is going to be used for the new remset bits, then we
714 // need to clear the old bits before the new bits are inserted.
715 const bool iterate_current_remset = active_remset_is_current && !in_place;
716
717 BitMap::Iterator iter = iterate_current_remset
718 ? from_page->remset_iterator_limited_current(from_local_offset, size)
719 : from_page->remset_iterator_limited_previous(from_local_offset, size);
720
721 for (BitMap::idx_t field_bit : iter) {
722 const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
723
724 // Add remset entry in the to-page
725 const uintptr_t offset = field_local_offset - from_local_offset;
726 const zaddress to_field = to_addr + offset;
727 log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
728 untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
729
730 volatile zpointer* const p = (volatile zpointer*)to_field;
731
732 if (ZGeneration::young()->is_phase_mark()) {
733 // Young generation remembered set scanning needs to know about this
734 // field. It will take responsibility to add a new remember set entry if needed.
735 _forwarding->relocated_remembered_fields_register(p);
736 } else {
737 to_page->remember(p);
738 if (in_place) {
739 assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
740 }
741 }
742 }
743 }
744
745 static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
746 if (ZHeap::heap()->is_young(addr)) {
747 ZRelocate::add_remset(p);
748 return true;
749 }
750
751 return false;
752 }
753
754 static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
755 const zpointer ptr = AtomicAccess::load(p);
756
757 assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
758
759 if (ZPointer::is_store_good(ptr)) {
760 // Already has a remset entry
761 return;
762 }
763
764 if (ZPointer::is_load_good(ptr)) {
765 if (!is_null_any(ptr)) {
766 const zaddress addr = ZPointer::uncolor(ptr);
767 add_remset_if_young(p, addr);
768 }
769 // No need to remap it is already load good
770 return;
771 }
772
773 if (is_null_any(ptr)) {
774 // Eagerly remap to skip adding a remset entry just to get deferred remapping
775 ZBarrier::remap_young_relocated(p, ptr);
776 return;
777 }
778
779 const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
780 ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
781
782 if (forwarding == nullptr) {
783 // Object isn't being relocated
784 const zaddress addr = safe(addr_unsafe);
785 if (!add_remset_if_young(p, addr)) {
786 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
787 ZBarrier::remap_young_relocated(p, ptr);
788 }
789 return;
790 }
791
792 const zaddress addr = forwarding->find(addr_unsafe);
793
794 if (!is_null(addr)) {
795 // Object has already been relocated
796 if (!add_remset_if_young(p, addr)) {
797 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
798 ZBarrier::remap_young_relocated(p, ptr);
799 }
800 return;
801 }
802
803 // Object has not been relocated yet
804 // Don't want to eagerly relocate objects, so just add a remset
805 ZRelocate::add_remset(p);
806 return;
807 }
808
809 void update_remset_promoted(zaddress to_addr) const {
810 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
811 }
812
813 void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
814 if (_forwarding->to_age() != ZPageAge::old) {
815 // No remembered set in young pages
816 return;
817 }
818
819 // Need to deal with remset when moving objects to the old generation
820 if (_forwarding->from_age() == ZPageAge::old) {
821 update_remset_old_to_old(from_addr, to_addr, size);
822 return;
823 }
824
825 // Normal promotion
826 update_remset_promoted(to_addr);
827 }
828
829 void maybe_string_dedup(zaddress to_addr) {
830 if (_forwarding->is_promotion()) {
831 // Only deduplicate promoted objects, and let short-lived strings simply die instead.
832 _string_dedup_context.request(to_oop(to_addr));
833 }
834 }
835
836 bool try_relocate_object(zaddress from_addr, uint32_t partition_id) {
837 size_t size = ZUtils::object_size(from_addr);
838 const zaddress to_addr = try_relocate_object_inner(from_addr, partition_id, size);
839
840 if (is_null(to_addr)) {
841 return false;
842 }
843
844 update_remset_for_fields(from_addr, to_addr, size);
845
846 maybe_string_dedup(to_addr);
847
848 return true;
849 }
850
851 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
852 if (_forwarding->from_age() != ZPageAge::old) {
853 // Only old pages have use remset bits
854 return;
855 }
856
857 if (ZGeneration::old()->active_remset_is_current()) {
858 // We want to iterate over and clear the remset bits of the from-space page,
859 // and insert current bits in the to-space page. However, with in-place
860 // relocation, the from-space and to-space pages are the same. Clearing
861 // is destructive, and is difficult to perform before or during the iteration.
862 // However, clearing of the current bits has to be done before exposing the
863 // to-space objects in the forwarding table.
864 //
865 // To solve this tricky dependency problem, we start by stashing away the
866 // current bits in the previous bits, and clearing the current bits
867 // (implemented by swapping the bits). This way, the current bits are
868 // cleared before copying the objects (like a normal to-space page),
869 // and the previous bits are representing a copy of the current bits
870 // of the from-space page, and are used for iteration.
871 from_page->swap_remset_bitmaps();
872 }
873 }
874
875 ZPage* start_in_place_relocation(zoffset relocated_watermark) {
876 _forwarding->in_place_relocation_claim_page();
877 _forwarding->in_place_relocation_start(relocated_watermark);
878
879 ZPage* const from_page = _forwarding->page();
880
881 const ZPageAge to_age = _forwarding->to_age();
882 const bool promotion = _forwarding->is_promotion();
883
884 // Promotions happen through a new cloned page
885 ZPage* const to_page = promotion
886 ? from_page->clone_for_promotion()
887 : from_page->reset(to_age);
888
889 // Reset page for in-place relocation
890 to_page->reset_top_for_allocation();
891
892 // Verify that the inactive remset is clear when resetting the page for
893 // in-place relocation.
894 if (from_page->age() == ZPageAge::old) {
895 if (ZGeneration::old()->active_remset_is_current()) {
896 to_page->verify_remset_cleared_previous();
897 } else {
898 to_page->verify_remset_cleared_current();
899 }
900 }
901
902 // Clear remset bits for all objects that were relocated
903 // before this page became an in-place relocated page.
904 start_in_place_relocation_prepare_remset(from_page);
905
906 if (promotion) {
907 // Register the promotion
908 ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
909 ZGeneration::young()->register_in_place_relocate_promoted(from_page);
910 }
911
912 return to_page;
913 }
914
915 void relocate_object(oop obj) {
916 const zaddress addr = to_zaddress(obj);
917 assert(ZHeap::heap()->is_object_live(addr), "Should be live");
918
919 const ZPageAge to_age = _forwarding->to_age();
920 const uint32_t partition_id = _forwarding->partition_id();
921
922 while (!try_relocate_object(addr, partition_id)) {
923 // Failed to relocate object, try to allocate a new target page,
924 // or if that fails, use the page being relocated as the new target,
925 // which will cause it to be relocated in-place.
926 ZPage* const target_page = _targets->get(partition_id, to_age);
927 ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target_page);
928 _targets->set(partition_id, to_age, to_page);
929
930 // We got a new page, retry relocation
931 if (to_page != nullptr) {
932 continue;
933 }
934
935 // Start in-place relocation to block other threads from accessing
936 // the page, or its forwarding table, until it has been released
937 // (relocation completed).
938 to_page = start_in_place_relocation(ZAddress::offset(addr));
939 _targets->set(partition_id, to_age, to_page);
940 }
941 }
942
943 public:
944 ZRelocateWork(Allocator* allocator, ZRelocationTargets* targets, ZGeneration* generation)
945 : _allocator(allocator),
946 _forwarding(nullptr),
947 _targets(targets),
948 _generation(generation),
949 _other_promoted(0),
950 _other_compacted(0) {}
951
952 ~ZRelocateWork() {
953 _targets->apply_and_clear_targets([&](ZPage* page) {
954 _allocator->free_target_page(page);
955 });
956
957 // Report statistics on-behalf of non-worker threads
958 _generation->increase_promoted(_other_promoted);
959 _generation->increase_compacted(_other_compacted);
960 }
961
962 bool active_remset_is_current() const {
963 // Normal old-to-old relocation can treat the from-page remset as a
964 // read-only copy, and then copy over the appropriate remset bits to the
965 // cleared to-page's 'current' remset bitmap.
966 //
967 // In-place relocation is more complicated. Since, the same page is both
968 // a from-page and a to-page, we need to remove the old remset bits, and
969 // add remset bits that corresponds to the new locations of the relocated
970 // objects.
971 //
972 // Depending on how long ago (in terms of number of young GC's and the
973 // current young GC's phase), the page was allocated, the active
974 // remembered set will be in either the 'current' or 'previous' bitmap.
975 //
976 // If the active bits are in the 'previous' bitmap, we know that the
977 // 'current' bitmap was cleared at some earlier point in time, and we can
978 // simply set new bits in 'current' bitmap, and later when relocation has
979 // read all the old remset bits, we could just clear the 'previous' remset
980 // bitmap.
981 //
982 // If, on the other hand, the active bits are in the 'current' bitmap, then
983 // that bitmap will be used to both read the old remset bits, and the
984 // destination for the remset bits that we copy when an object is copied
985 // to it's new location within the page. We need to *carefully* remove all
986 // all old remset bits, without clearing out the newly set bits.
987 return ZGeneration::old()->active_remset_is_current();
988 }
989
990 void clear_remset_before_in_place_reuse(ZPage* page) {
991 if (_forwarding->from_age() != ZPageAge::old) {
992 // No remset bits
993 return;
994 }
995
996 // Clear 'previous' remset bits. For in-place relocated pages, the previous
997 // remset bits are always used, even when active_remset_is_current().
998 page->clear_remset_previous();
999 }
1000
1001 void finish_in_place_relocation() {
1002 // We are done with the from_space copy of the page
1003 _forwarding->in_place_relocation_finish();
1004 }
1005
1006 void do_forwarding(ZForwarding* forwarding) {
1007 _forwarding = forwarding;
1008
1009 _forwarding->page()->log_msg(" (relocate page)");
1010
1011 ZVerify::before_relocation(_forwarding);
1012
1013 // Relocate objects
1014 _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
1015
1016 ZVerify::after_relocation(_forwarding);
1017
1018 // Verify
1019 if (ZVerifyForwarding) {
1020 _forwarding->verify();
1021 }
1022
1023 _generation->increase_freed(_forwarding->page()->size());
1024
1025 // Deal with in-place relocation
1026 const bool in_place = _forwarding->in_place_relocation();
1027 if (in_place) {
1028 finish_in_place_relocation();
1029 }
1030
1031 // Old from-space pages need to deal with remset bits
1032 if (_forwarding->from_age() == ZPageAge::old) {
1033 _forwarding->relocated_remembered_fields_after_relocate();
1034 }
1035
1036 // Release relocated page
1037 _forwarding->release_page();
1038
1039 if (in_place) {
1040 // Wait for all other threads to call release_page
1041 ZPage* const page = _forwarding->detach_page();
1042
1043 // Ensure that previous remset bits are cleared
1044 clear_remset_before_in_place_reuse(page);
1045
1046 page->log_msg(" (relocate page done in-place)");
1047
1048 // Different pages when promoting
1049 const uint32_t target_partition = _forwarding->partition_id();
1050 ZPage* const target_page = _targets->get(target_partition, _forwarding->to_age());
1051 _allocator->share_target_page(target_page, target_partition);
1052
1053 } else {
1054 // Wait for all other threads to call release_page
1055 ZPage* const page = _forwarding->detach_page();
1056
1057 page->log_msg(" (relocate page done normal)");
1058
1059 // Free page
1060 ZHeap::heap()->free_page(page);
1061 }
1062 }
1063 };
1064
1065 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1066 public:
1067 virtual void do_thread(Thread* thread) {
1068 JavaThread* const jt = JavaThread::cast(thread);
1069 ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1070 buffer->install_base_pointers();
1071 }
1072 };
1073
1074 // Installs the object base pointers (object starts), for the fields written
1075 // in the store buffer. The code that searches for the object start uses that
1076 // liveness information stored in the pages. That information is lost when the
1077 // pages have been relocated and then destroyed.
1078 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1079 private:
1080 ZJavaThreadsIterator _threads_iter;
1081
1082 public:
1083 ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1084 : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1085 _threads_iter(generation->id_optional()) {}
1086
1087 virtual void work() {
1088 ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1089 _threads_iter.apply(&fix_store_buffer_cl);
1090 }
1091 };
1092
1093 class ZRelocateTask : public ZRestartableTask {
1094 private:
1095 ZGeneration* const _generation;
1096 ZRelocateQueue* const _queue;
1097 ZPerNUMA<ZRelocationSetParallelIterator>* _iters;
1098 ZPerWorker<ZRelocationTargets>* _small_targets;
1099 ZPerWorker<ZRelocationTargets>* _medium_targets;
1100 ZRelocateSmallAllocator _small_allocator;
1101 ZRelocateMediumAllocator _medium_allocator;
1102 const size_t _total_forwardings;
1103 volatile size_t _numa_local_forwardings;
1104
1105 public:
1106 ZRelocateTask(ZRelocationSet* relocation_set,
1107 ZRelocateQueue* queue,
1108 ZPerNUMA<ZRelocationSetParallelIterator>* iters,
1109 ZPerWorker<ZRelocationTargets>* small_targets,
1110 ZPerWorker<ZRelocationTargets>* medium_targets,
1111 ZRelocationTargets* shared_medium_targets)
1112 : ZRestartableTask("ZRelocateTask"),
1113 _generation(relocation_set->generation()),
1114 _queue(queue),
1115 _iters(iters),
1116 _small_targets(small_targets),
1117 _medium_targets(medium_targets),
1118 _small_allocator(_generation),
1119 _medium_allocator(_generation, shared_medium_targets),
1120 _total_forwardings(relocation_set->nforwardings()),
1121 _numa_local_forwardings(0) {
1122
1123 for (uint32_t i = 0; i < ZNUMA::count(); i++) {
1124 ZRelocationSetParallelIterator* const iter = _iters->addr(i);
1125
1126 // Destruct the iterator from the previous GC-cycle, which is a temporary
1127 // iterator if this is the first GC-cycle.
1128 iter->~ZRelocationSetParallelIterator();
1129
1130 // In-place construct the iterator with the current relocation set
1131 ::new (iter) ZRelocationSetParallelIterator(relocation_set);
1132 }
1133 }
1134
1135 ~ZRelocateTask() {
1136 _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1137
1138 // Signal that we're not using the queue anymore. Used mostly for asserts.
1139 _queue->deactivate();
1140
1141 if (ZNUMA::is_enabled()) {
1142 log_debug(gc, reloc, numa)("Forwardings relocated NUMA-locally: %zu / %zu (%.0f%%)",
1143 _numa_local_forwardings, _total_forwardings, percent_of(_numa_local_forwardings, _total_forwardings));
1144 }
1145 }
1146
1147 virtual void work() {
1148 ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _small_targets->addr(), _generation);
1149 ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _medium_targets->addr(), _generation);
1150 const uint32_t num_nodes = ZNUMA::count();
1151 uint32_t numa_local_forwardings_worker = 0;
1152
1153 const auto do_forwarding = [&](ZForwarding* forwarding) {
1154 ZPage* const page = forwarding->page();
1155 if (page->is_small()) {
1156 small.do_forwarding(forwarding);
1157 } else {
1158 medium.do_forwarding(forwarding);
1159 }
1160
1161 // Absolute last thing done while relocating a page.
1162 //
1163 // We don't use the SuspendibleThreadSet when relocating pages.
1164 // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1165 //
1166 // After the mark_done call a safepointing could be completed and a
1167 // new GC phase could be entered.
1168 forwarding->mark_done();
1169 };
1170
1171 const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1172 if (forwarding->claim()) {
1173 do_forwarding(forwarding);
1174 }
1175 };
1176
1177 const auto check_numa_local = [&](ZForwarding* forwarding, uint32_t numa_id) {
1178 return forwarding->partition_id() == numa_id;
1179 };
1180
1181 const auto do_forwarding_one_from_iter = [&]() {
1182 ZForwarding* forwarding;
1183 const uint32_t start_node = ZNUMA::id();
1184 uint32_t current_node = start_node;
1185
1186 for (uint32_t i = 0; i < num_nodes; i++) {
1187 if (_iters->get(current_node).next_if(&forwarding, check_numa_local, current_node)) {
1188 claim_and_do_forwarding(forwarding);
1189
1190 if (current_node == start_node) {
1191 // Track if this forwarding was relocated on the local NUMA node
1192 numa_local_forwardings_worker++;
1193 }
1194
1195 return true;
1196 }
1197
1198 // Check next node.
1199 current_node = (current_node + 1) % num_nodes;
1200 }
1201
1202 return false;
1203 };
1204
1205 for (;;) {
1206 // As long as there are requests in the relocate queue, there are threads
1207 // waiting in a VM state that does not allow them to be blocked. The
1208 // worker thread needs to finish relocate these pages, and allow the
1209 // other threads to continue and proceed to a blocking state. After that,
1210 // the worker threads are allowed to safepoint synchronize.
1211 for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1212 do_forwarding(forwarding);
1213 }
1214
1215 if (!do_forwarding_one_from_iter()) {
1216 // No more work
1217 break;
1218 }
1219
1220 if (_generation->should_worker_resize()) {
1221 break;
1222 }
1223 }
1224
1225 if (ZNUMA::is_enabled()) {
1226 AtomicAccess::add(&_numa_local_forwardings, numa_local_forwardings_worker, memory_order_relaxed);
1227 }
1228
1229 _queue->leave();
1230 }
1231
1232 virtual void resize_workers(uint nworkers) {
1233 _queue->resize_workers(nworkers);
1234 }
1235 };
1236
1237 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1238 const zpointer ptr = AtomicAccess::load(p);
1239
1240 if (ZPointer::is_store_good(ptr)) {
1241 // Already has a remset entry
1242 return;
1243 }
1244
1245 // Remset entries are used for two reasons:
1246 // 1) Young marking old-to-young pointer roots
1247 // 2) Deferred remapping of stale old-to-young pointers
1248 //
1249 // This load barrier will up-front perform the remapping of (2),
1250 // and the code below only has to make sure we register up-to-date
1251 // old-to-young pointers for (1).
1252 const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1253
1254 if (is_null(addr)) {
1255 // No need for remset entries for null pointers
1256 return;
1257 }
1258
1259 if (ZHeap::heap()->is_old(addr)) {
1260 // No need for remset entries for pointers to old gen
1261 return;
1262 }
1263
1264 ZRelocate::add_remset(p);
1265 }
1266
1267 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1268 private:
1269 ZStatTimerYoung _timer;
1270 ZArrayParallelIterator<ZPage*> _iter;
1271
1272 public:
1273 ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1274 : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1275 _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1276 _iter(pages) {}
1277
1278 virtual void work() {
1279 SuspendibleThreadSetJoiner sts_joiner;
1280 ZStringDedupContext string_dedup_context;
1281
1282 for (ZPage* page; _iter.next(&page);) {
1283 page->object_iterate([&](oop obj) {
1284 // Remap oops and add remset if needed
1285 ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1286
1287 // String dedup
1288 string_dedup_context.request(obj);
1289 });
1290
1291 SuspendibleThreadSet::yield();
1292 if (ZGeneration::young()->should_worker_resize()) {
1293 return;
1294 }
1295 }
1296 }
1297 };
1298
1299 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1300 {
1301 // Install the store buffer's base pointers before the
1302 // relocate task destroys the liveness information in
1303 // the relocated pages.
1304 ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1305 workers()->run(&buffer_task);
1306 }
1307
1308 {
1309 ZRelocateTask relocate_task(relocation_set, &_queue, &_iters, &_small_targets, &_medium_targets, &_shared_medium_targets);
1310 workers()->run(&relocate_task);
1311 }
1312
1313 if (relocation_set->generation()->is_young()) {
1314 ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1315 workers()->run(&task);
1316 }
1317 }
1318
1319 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1320 if (from_age == ZPageAge::old) {
1321 return ZPageAge::old;
1322 }
1323
1324 const uint age = untype(from_age);
1325 if (age >= ZGeneration::young()->tenuring_threshold()) {
1326 return ZPageAge::old;
1327 }
1328
1329 return to_zpageage(age + 1);
1330 }
1331
1332 class ZFlipAgePagesTask : public ZTask {
1333 private:
1334 ZArrayParallelIterator<ZPage*> _iter;
1335
1336 public:
1337 ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1338 : ZTask("ZFlipAgePagesTask"),
1339 _iter(pages) {}
1340
1341 virtual void work() {
1342 SuspendibleThreadSetJoiner sts_joiner;
1343 ZArray<ZPage*> promoted_pages;
1344
1345 for (ZPage* prev_page; _iter.next(&prev_page);) {
1346 const ZPageAge from_age = prev_page->age();
1347 const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1348 assert(from_age != ZPageAge::old, "invalid age for a young collection");
1349
1350 // Figure out if this is proper promotion
1351 const bool promotion = to_age == ZPageAge::old;
1352
1353 // Logging
1354 prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1355
1356 // Setup to-space page
1357 ZPage* const new_page = promotion
1358 ? prev_page->clone_for_promotion()
1359 : prev_page->reset(to_age);
1360
1361 // Reset page for flip aging
1362 new_page->reset_livemap();
1363
1364 if (promotion) {
1365 ZGeneration::young()->flip_promote(prev_page, new_page);
1366 // Defer promoted page registration
1367 promoted_pages.push(prev_page);
1368 }
1369
1370 SuspendibleThreadSet::yield();
1371 }
1372
1373 ZGeneration::young()->register_flip_promoted(promoted_pages);
1374 }
1375 };
1376
1377 class ZPromoteBarrierTask : public ZTask {
1378 private:
1379 ZArrayParallelIterator<ZPage*> _iter;
1380
1381 public:
1382 ZPromoteBarrierTask(const ZArray<ZPage*>* pages)
1383 : ZTask("ZPromoteBarrierTask"),
1384 _iter(pages) {}
1385
1386 virtual void work() {
1387 SuspendibleThreadSetJoiner sts_joiner;
1388
1389 for (ZPage* page; _iter.next(&page);) {
1390 // When promoting an object (and before relocate start), we must ensure that all
1391 // contained zpointers are store good. The marking code ensures that for non-null
1392 // pointers, but null pointers are ignored. This code ensures that even null pointers
1393 // are made store good, for the promoted objects.
1394 page->object_iterate([&](oop obj) {
1395 ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1396 });
1397
1398 SuspendibleThreadSet::yield();
1399 }
1400 }
1401 };
1402
1403 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1404 ZFlipAgePagesTask flip_age_task(pages);
1405 workers()->run(&flip_age_task);
1406 }
1407
1408 void ZRelocate::barrier_flip_promoted_pages(const ZArray<ZPage*>* pages) {
1409 ZPromoteBarrierTask promote_barrier_task(pages);
1410 workers()->run(&promote_barrier_task);
1411 }
1412
1413 void ZRelocate::synchronize() {
1414 _queue.synchronize();
1415 }
1416
1417 void ZRelocate::desynchronize() {
1418 _queue.desynchronize();
1419 }
1420
1421 ZRelocateQueue* ZRelocate::queue() {
1422 return &_queue;
1423 }
1424
1425 bool ZRelocate::is_queue_active() const {
1426 return _queue.is_active();
1427 }