1 /*
2 * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "gc/shared/gc_globals.hpp"
25 #include "gc/shared/suspendibleThreadSet.hpp"
26 #include "gc/z/zAbort.inline.hpp"
27 #include "gc/z/zAddress.inline.hpp"
28 #include "gc/z/zAllocator.inline.hpp"
29 #include "gc/z/zBarrier.inline.hpp"
30 #include "gc/z/zCollectedHeap.hpp"
31 #include "gc/z/zForwarding.inline.hpp"
32 #include "gc/z/zGeneration.inline.hpp"
33 #include "gc/z/zHeap.inline.hpp"
34 #include "gc/z/zIndexDistributor.inline.hpp"
35 #include "gc/z/zIterator.inline.hpp"
36 #include "gc/z/zPage.inline.hpp"
37 #include "gc/z/zPageAge.hpp"
38 #include "gc/z/zRelocate.hpp"
39 #include "gc/z/zRelocationSet.inline.hpp"
40 #include "gc/z/zRootsIterator.hpp"
41 #include "gc/z/zStackWatermark.hpp"
42 #include "gc/z/zStat.hpp"
43 #include "gc/z/zStringDedup.inline.hpp"
44 #include "gc/z/zTask.hpp"
45 #include "gc/z/zUncoloredRoot.inline.hpp"
46 #include "gc/z/zVerify.hpp"
47 #include "gc/z/zWorkers.hpp"
48 #include "prims/jvmtiTagMap.hpp"
49 #include "runtime/atomic.hpp"
50 #include "utilities/debug.hpp"
51
52 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
53 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
54
55 ZRelocateQueue::ZRelocateQueue()
56 : _lock(),
57 _queue(),
58 _nworkers(0),
59 _nsynchronized(0),
60 _synchronize(false),
61 _is_active(false),
62 _needs_attention(0) {}
63
64 bool ZRelocateQueue::needs_attention() const {
65 return Atomic::load(&_needs_attention) != 0;
66 }
67
68 void ZRelocateQueue::inc_needs_attention() {
69 const int needs_attention = Atomic::add(&_needs_attention, 1);
70 assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
71 }
72
73 void ZRelocateQueue::dec_needs_attention() {
74 const int needs_attention = Atomic::sub(&_needs_attention, 1);
75 assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
76 }
77
78 void ZRelocateQueue::activate(uint nworkers) {
79 _is_active = true;
80 join(nworkers);
81 }
82
83 void ZRelocateQueue::deactivate() {
84 Atomic::store(&_is_active, false);
85 clear();
86 }
87
88 bool ZRelocateQueue::is_active() const {
89 return Atomic::load(&_is_active);
90 }
91
92 void ZRelocateQueue::join(uint nworkers) {
93 assert(nworkers != 0, "Must request at least one worker");
94 assert(_nworkers == 0, "Invalid state");
95 assert(_nsynchronized == 0, "Invalid state");
96
97 log_debug(gc, reloc)("Joining workers: %u", nworkers);
98
99 _nworkers = nworkers;
100 }
101
102 void ZRelocateQueue::resize_workers(uint nworkers) {
103 assert(nworkers != 0, "Must request at least one worker");
104 assert(_nworkers == 0, "Invalid state");
105 assert(_nsynchronized == 0, "Invalid state");
106
107 log_debug(gc, reloc)("Resize workers: %u", nworkers);
108
109 ZLocker<ZConditionLock> locker(&_lock);
110 _nworkers = nworkers;
111 }
112
113 void ZRelocateQueue::leave() {
114 ZLocker<ZConditionLock> locker(&_lock);
115 _nworkers--;
116
117 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
118
119 log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
120
121 // Prune done forwardings
122 const bool forwardings_done = prune();
123
124 // Check if all workers synchronized
125 const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
126
127 if (forwardings_done || last_synchronized) {
128 _lock.notify_all();
129 }
130 }
131
132 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
133 ZStatTimer timer(ZCriticalPhaseRelocationStall);
134 ZLocker<ZConditionLock> locker(&_lock);
135
136 if (forwarding->is_done()) {
137 return;
138 }
139
140 _queue.append(forwarding);
141 if (_queue.length() == 1) {
142 // Queue became non-empty
143 inc_needs_attention();
144 _lock.notify_all();
145 }
146
147 while (!forwarding->is_done()) {
148 _lock.wait();
149 }
150 }
151
152 bool ZRelocateQueue::prune() {
153 if (_queue.is_empty()) {
154 return false;
155 }
156
157 bool done = false;
158
159 for (int i = 0; i < _queue.length();) {
160 const ZForwarding* const forwarding = _queue.at(i);
161 if (forwarding->is_done()) {
162 done = true;
163
164 _queue.delete_at(i);
165 } else {
166 i++;
167 }
168 }
169
170 if (_queue.is_empty()) {
171 dec_needs_attention();
172 }
173
174 return done;
175 }
176
177 ZForwarding* ZRelocateQueue::prune_and_claim() {
178 if (prune()) {
179 _lock.notify_all();
180 }
181
182 for (int i = 0; i < _queue.length(); i++) {
183 ZForwarding* const forwarding = _queue.at(i);
184 if (forwarding->claim()) {
185 return forwarding;
186 }
187 }
188
189 return nullptr;
190 }
191
192 class ZRelocateQueueSynchronizeThread {
193 private:
194 ZRelocateQueue* const _queue;
195
196 public:
197 ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
198 : _queue(queue) {
199 _queue->synchronize_thread();
200 }
201
202 ~ZRelocateQueueSynchronizeThread() {
203 _queue->desynchronize_thread();
204 }
205 };
206
207 void ZRelocateQueue::synchronize_thread() {
208 _nsynchronized++;
209
210 log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
211
212 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
213 if (_nsynchronized == _nworkers) {
214 // All workers synchronized
215 _lock.notify_all();
216 }
217 }
218
219 void ZRelocateQueue::desynchronize_thread() {
220 _nsynchronized--;
221
222 log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
223
224 assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
225 }
226
227 ZForwarding* ZRelocateQueue::synchronize_poll() {
228 // Fast path avoids locking
229 if (!needs_attention()) {
230 return nullptr;
231 }
232
233 // Slow path to get the next forwarding and/or synchronize
234 ZLocker<ZConditionLock> locker(&_lock);
235
236 {
237 ZForwarding* const forwarding = prune_and_claim();
238 if (forwarding != nullptr) {
239 // Don't become synchronized while there are elements in the queue
240 return forwarding;
241 }
242 }
243
244 if (!_synchronize) {
245 return nullptr;
246 }
247
248 ZRelocateQueueSynchronizeThread rqst(this);
249
250 do {
251 _lock.wait();
252
253 ZForwarding* const forwarding = prune_and_claim();
254 if (forwarding != nullptr) {
255 return forwarding;
256 }
257 } while (_synchronize);
258
259 return nullptr;
260 }
261
262 void ZRelocateQueue::clear() {
263 assert(_nworkers == 0, "Invalid state");
264
265 if (_queue.is_empty()) {
266 return;
267 }
268
269 ZArrayIterator<ZForwarding*> iter(&_queue);
270 for (ZForwarding* forwarding; iter.next(&forwarding);) {
271 assert(forwarding->is_done(), "All should be done");
272 }
273
274 assert(false, "Clear was not empty");
275
276 _queue.clear();
277 dec_needs_attention();
278 }
279
280 void ZRelocateQueue::synchronize() {
281 ZLocker<ZConditionLock> locker(&_lock);
282 _synchronize = true;
283
284 inc_needs_attention();
285
286 log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
287
288 while (_nworkers != _nsynchronized) {
289 _lock.wait();
290 log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
291 }
292 }
293
294 void ZRelocateQueue::desynchronize() {
295 ZLocker<ZConditionLock> locker(&_lock);
296 _synchronize = false;
297
298 log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
299
300 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
301
302 dec_needs_attention();
303
304 _lock.notify_all();
305 }
306
307 ZRelocate::ZRelocate(ZGeneration* generation)
308 : _generation(generation),
309 _queue() {}
310
311 ZWorkers* ZRelocate::workers() const {
312 return _generation->workers();
313 }
314
315 void ZRelocate::start() {
316 _queue.activate(workers()->active_workers());
317 }
318
319 void ZRelocate::add_remset(volatile zpointer* p) {
320 ZGeneration::young()->remember(p);
321 }
322
323 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
324 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
325
326 // Allocate object
327 const size_t size = ZUtils::object_size(from_addr);
328
329 ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
330
331 const zaddress to_addr = allocator->alloc_object(size);
332
333 if (is_null(to_addr)) {
334 // Allocation failed
335 return zaddress::null;
336 }
337
338 // Copy object
339 ZUtils::object_copy_disjoint(from_addr, to_addr, size);
340
341 // Insert forwarding
342 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
343
344 if (to_addr_final != to_addr) {
345 // Already relocated, try undo allocation
346 allocator->undo_alloc_object(to_addr, size);
347 }
348
349 return to_addr_final;
350 }
351
352 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
353 ZForwardingCursor cursor;
354
355 // Lookup forwarding
356 zaddress to_addr = forwarding->find(from_addr, &cursor);
357 if (!is_null(to_addr)) {
358 // Already relocated
359 return to_addr;
360 }
361
362 // Relocate object
363 if (forwarding->retain_page(&_queue)) {
364 assert(_generation->is_phase_relocate(), "Must be");
365 to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
366 forwarding->release_page();
367
368 if (!is_null(to_addr)) {
369 // Success
370 return to_addr;
371 }
372
373 // Failed to relocate object. Signal and wait for a worker thread to
374 // complete relocation of this page, and then forward the object.
375 _queue.add_and_wait(forwarding);
376 }
377
378 // Forward object
379 return forward_object(forwarding, from_addr);
380 }
381
382 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
383 const zaddress to_addr = forwarding->find(from_addr);
384 assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
385 return to_addr;
386 }
387
388 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) {
389 if (ZStressRelocateInPlace) {
390 // Simulate failure to allocate a new page. This will
391 // cause the page being relocated to be relocated in-place.
392 return nullptr;
393 }
394
395 ZAllocationFlags flags;
396 flags.set_non_blocking();
397 flags.set_gc_relocation();
398
399 return allocator->alloc_page_for_relocation(type, size, flags);
400 }
401
402 static void retire_target_page(ZGeneration* generation, ZPage* page) {
403 if (generation->is_young() && page->is_old()) {
404 generation->increase_promoted(page->used());
405 } else {
406 generation->increase_compacted(page->used());
407 }
408
409 // Free target page if it is empty. We can end up with an empty target
410 // page if we allocated a new target page, and then lost the race to
411 // relocate the remaining objects, leaving the target page empty when
412 // relocation completed.
413 if (page->used() == 0) {
414 ZHeap::heap()->free_page(page);
415 }
416 }
417
418 class ZRelocateSmallAllocator {
419 private:
420 ZGeneration* const _generation;
421 volatile size_t _in_place_count;
422
423 public:
424 ZRelocateSmallAllocator(ZGeneration* generation)
425 : _generation(generation),
426 _in_place_count(0) {}
427
428 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
429 ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
430 ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size());
431 if (page == nullptr) {
432 Atomic::inc(&_in_place_count);
433 }
434
435 if (target != nullptr) {
436 // Retire the old target page
437 retire_target_page(_generation, target);
438 }
439
440 return page;
441 }
442
443 void share_target_page(ZPage* page) {
444 // Does nothing
445 }
446
447 void free_target_page(ZPage* page) {
448 if (page != nullptr) {
449 retire_target_page(_generation, page);
450 }
451 }
452
453 zaddress alloc_object(ZPage* page, size_t size) const {
454 return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
455 }
456
457 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
458 page->undo_alloc_object(addr, size);
459 }
460
461 size_t in_place_count() const {
462 return _in_place_count;
463 }
464 };
465
466 class ZRelocateMediumAllocator {
467 private:
468 ZGeneration* const _generation;
469 ZConditionLock _lock;
470 ZPage* _shared[ZAllocator::_relocation_allocators];
471 bool _in_place;
472 volatile size_t _in_place_count;
473
474 public:
475 ZRelocateMediumAllocator(ZGeneration* generation)
476 : _generation(generation),
477 _lock(),
478 _shared(),
479 _in_place(false),
480 _in_place_count(0) {}
481
482 ~ZRelocateMediumAllocator() {
483 for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
484 if (_shared[i] != nullptr) {
485 retire_target_page(_generation, _shared[i]);
486 }
487 }
488 }
489
490 ZPage* shared(ZPageAge age) {
491 return _shared[untype(age - 1)];
492 }
493
494 void set_shared(ZPageAge age, ZPage* page) {
495 _shared[untype(age - 1)] = page;
496 }
497
498 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
499 ZLocker<ZConditionLock> locker(&_lock);
500
501 // Wait for any ongoing in-place relocation to complete
502 while (_in_place) {
503 _lock.wait();
504 }
505
506 // Allocate a new page only if the shared page is the same as the
507 // current target page. The shared page will be different from the
508 // current target page if another thread shared a page, or allocated
509 // a new page.
510 const ZPageAge to_age = forwarding->to_age();
511 if (shared(to_age) == target) {
512 ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
513 ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size());
514 set_shared(to_age, to_page);
515 if (to_page == nullptr) {
516 Atomic::inc(&_in_place_count);
517 _in_place = true;
518 }
519
520 // This thread is responsible for retiring the shared target page
521 if (target != nullptr) {
522 retire_target_page(_generation, target);
523 }
524 }
525
526 return shared(to_age);
527 }
528
529 void share_target_page(ZPage* page) {
530 const ZPageAge age = page->age();
531
532 ZLocker<ZConditionLock> locker(&_lock);
533 assert(_in_place, "Invalid state");
534 assert(shared(age) == nullptr, "Invalid state");
535 assert(page != nullptr, "Invalid page");
536
537 set_shared(age, page);
538 _in_place = false;
539
540 _lock.notify_all();
541 }
542
543 void free_target_page(ZPage* page) {
544 // Does nothing
545 }
546
547 zaddress alloc_object(ZPage* page, size_t size) const {
548 return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
549 }
550
551 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
552 page->undo_alloc_object_atomic(addr, size);
553 }
554
555 size_t in_place_count() const {
556 return _in_place_count;
557 }
558 };
559
560 template <typename Allocator>
561 class ZRelocateWork : public StackObj {
562 private:
563 Allocator* const _allocator;
564 ZForwarding* _forwarding;
565 ZPage* _target[ZAllocator::_relocation_allocators];
566 ZGeneration* const _generation;
567 size_t _other_promoted;
568 size_t _other_compacted;
569 ZStringDedupContext _string_dedup_context;
570
571
572 ZPage* target(ZPageAge age) {
573 return _target[untype(age - 1)];
574 }
575
576 void set_target(ZPageAge age, ZPage* page) {
577 _target[untype(age - 1)] = page;
578 }
579
580 size_t object_alignment() const {
581 return (size_t)1 << _forwarding->object_alignment_shift();
582 }
583
584 void increase_other_forwarded(size_t unaligned_object_size) {
585 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
586 if (_forwarding->is_promotion()) {
587 _other_promoted += aligned_size;
588 } else {
589 _other_compacted += aligned_size;
590 }
591 }
592
593 zaddress try_relocate_object_inner(zaddress from_addr) {
594 ZForwardingCursor cursor;
595
596 const size_t size = ZUtils::object_size(from_addr);
597 ZPage* const to_page = target(_forwarding->to_age());
598
599 // Lookup forwarding
600 {
601 const zaddress to_addr = _forwarding->find(from_addr, &cursor);
602 if (!is_null(to_addr)) {
603 // Already relocated
604 increase_other_forwarded(size);
605 return to_addr;
606 }
607 }
608
609 // Allocate object
610 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
611 if (is_null(allocated_addr)) {
612 // Allocation failed
613 return zaddress::null;
614 }
615
616 // Copy object. Use conjoint copying if we are relocating
617 // in-place and the new object overlaps with the old object.
618 if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
619 ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
620 } else {
621 ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
622 }
623
624 // Insert forwarding
625 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
626 if (to_addr != allocated_addr) {
627 // Already relocated, undo allocation
628 _allocator->undo_alloc_object(to_page, to_addr, size);
629 increase_other_forwarded(size);
630 }
631
632 return to_addr;
633 }
634
635 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
636 // Old-to-old relocation - move existing remset bits
637
638 // If this is called for an in-place relocated page, then this code has the
639 // responsibility to clear the old remset bits. Extra care is needed because:
640 //
641 // 1) The to-object copy can overlap with the from-object copy
642 // 2) Remset bits of old objects need to be cleared
643 //
644 // A watermark is used to keep track of how far the old remset bits have been removed.
645
646 const bool in_place = _forwarding->in_place_relocation();
647 ZPage* const from_page = _forwarding->page();
648 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
649
650 // Note: even with in-place relocation, the to_page could be another page
651 ZPage* const to_page = ZHeap::heap()->page(to_addr);
652
653 // Uses _relaxed version to handle that in-place relocation resets _top
654 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
655 assert(to_page->is_in(to_addr), "Must be");
656
657
658 // Read the size from the to-object, since the from-object
659 // could have been overwritten during in-place relocation.
660 const size_t size = ZUtils::object_size(to_addr);
661
662 // If a young generation collection started while the old generation
663 // relocated objects, the remember set bits were flipped from "current"
664 // to "previous".
665 //
666 // We need to select the correct remembered sets bitmap to ensure that the
667 // old remset bits are found.
668 //
669 // Note that if the young generation marking (remset scanning) finishes
670 // before the old generation relocation has relocated this page, then the
671 // young generation will visit this page's previous remembered set bits and
672 // moved them over to the current bitmap.
673 //
674 // If the young generation runs multiple cycles while the old generation is
675 // relocating, then the first cycle will have consumed the old remset,
676 // bits and moved associated objects to a new old page. The old relocation
677 // could find either of the two bitmaps. So, either it will find the original
678 // remset bits for the page, or it will find an empty bitmap for the page. It
679 // doesn't matter for correctness, because the young generation marking has
680 // already taken care of the bits.
681
682 const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
683
684 // When in-place relocation is done and the old remset bits are located in
685 // the bitmap that is going to be used for the new remset bits, then we
686 // need to clear the old bits before the new bits are inserted.
687 const bool iterate_current_remset = active_remset_is_current && !in_place;
688
689 BitMap::Iterator iter = iterate_current_remset
690 ? from_page->remset_iterator_limited_current(from_local_offset, size)
691 : from_page->remset_iterator_limited_previous(from_local_offset, size);
692
693 for (BitMap::idx_t field_bit : iter) {
694 const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
695
696 // Add remset entry in the to-page
697 const uintptr_t offset = field_local_offset - from_local_offset;
698 const zaddress to_field = to_addr + offset;
699 log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
700 untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
701
702 volatile zpointer* const p = (volatile zpointer*)to_field;
703
704 if (ZGeneration::young()->is_phase_mark()) {
705 // Young generation remembered set scanning needs to know about this
706 // field. It will take responsibility to add a new remember set entry if needed.
707 _forwarding->relocated_remembered_fields_register(p);
708 } else {
709 to_page->remember(p);
710 if (in_place) {
711 assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
712 }
713 }
714 }
715 }
716
717 static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
718 if (ZHeap::heap()->is_young(addr)) {
719 ZRelocate::add_remset(p);
720 return true;
721 }
722
723 return false;
724 }
725
726 static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
727 const zpointer ptr = Atomic::load(p);
728
729 assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
730
731 if (ZPointer::is_store_good(ptr)) {
732 // Already has a remset entry
733 return;
734 }
735
736 if (ZPointer::is_load_good(ptr)) {
737 if (!is_null_any(ptr)) {
738 const zaddress addr = ZPointer::uncolor(ptr);
739 add_remset_if_young(p, addr);
740 }
741 // No need to remap it is already load good
742 return;
743 }
744
745 if (is_null_any(ptr)) {
746 // Eagerly remap to skip adding a remset entry just to get deferred remapping
747 ZBarrier::remap_young_relocated(p, ptr);
748 return;
749 }
750
751 const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
752 ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
753
754 if (forwarding == nullptr) {
755 // Object isn't being relocated
756 const zaddress addr = safe(addr_unsafe);
757 if (!add_remset_if_young(p, addr)) {
758 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
759 ZBarrier::remap_young_relocated(p, ptr);
760 }
761 return;
762 }
763
764 const zaddress addr = forwarding->find(addr_unsafe);
765
766 if (!is_null(addr)) {
767 // Object has already been relocated
768 if (!add_remset_if_young(p, addr)) {
769 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
770 ZBarrier::remap_young_relocated(p, ptr);
771 }
772 return;
773 }
774
775 // Object has not been relocated yet
776 // Don't want to eagerly relocate objects, so just add a remset
777 ZRelocate::add_remset(p);
778 return;
779 }
780
781 void update_remset_promoted(zaddress to_addr) const {
782 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
783 }
784
785 void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
786 if (_forwarding->to_age() != ZPageAge::old) {
787 // No remembered set in young pages
788 return;
789 }
790
791 // Need to deal with remset when moving objects to the old generation
792 if (_forwarding->from_age() == ZPageAge::old) {
793 update_remset_old_to_old(from_addr, to_addr);
794 return;
795 }
796
797 // Normal promotion
798 update_remset_promoted(to_addr);
799 }
800
801 void maybe_string_dedup(zaddress to_addr) {
802 if (_forwarding->is_promotion()) {
803 // Only deduplicate promoted objects, and let short-lived strings simply die instead.
804 _string_dedup_context.request(to_oop(to_addr));
805 }
806 }
807
808 bool try_relocate_object(zaddress from_addr) {
809 const zaddress to_addr = try_relocate_object_inner(from_addr);
810
811 if (is_null(to_addr)) {
812 return false;
813 }
814
815 update_remset_for_fields(from_addr, to_addr);
816
817 maybe_string_dedup(to_addr);
818
819 return true;
820 }
821
822 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
823 if (_forwarding->from_age() != ZPageAge::old) {
824 // Only old pages have use remset bits
825 return;
826 }
827
828 if (ZGeneration::old()->active_remset_is_current()) {
829 // We want to iterate over and clear the remset bits of the from-space page,
830 // and insert current bits in the to-space page. However, with in-place
831 // relocation, the from-space and to-space pages are the same. Clearing
832 // is destructive, and is difficult to perform before or during the iteration.
833 // However, clearing of the current bits has to be done before exposing the
834 // to-space objects in the forwarding table.
835 //
836 // To solve this tricky dependency problem, we start by stashing away the
837 // current bits in the previous bits, and clearing the current bits
838 // (implemented by swapping the bits). This way, the current bits are
839 // cleared before copying the objects (like a normal to-space page),
840 // and the previous bits are representing a copy of the current bits
841 // of the from-space page, and are used for iteration.
842 from_page->swap_remset_bitmaps();
843 }
844 }
845
846 ZPage* start_in_place_relocation(zoffset relocated_watermark) {
847 _forwarding->in_place_relocation_claim_page();
848 _forwarding->in_place_relocation_start(relocated_watermark);
849
850 ZPage* const from_page = _forwarding->page();
851
852 const ZPageAge to_age = _forwarding->to_age();
853 const bool promotion = _forwarding->is_promotion();
854
855 // Promotions happen through a new cloned page
856 ZPage* const to_page = promotion
857 ? from_page->clone_for_promotion()
858 : from_page->reset(to_age);
859
860 // Reset page for in-place relocation
861 to_page->reset_top_for_allocation();
862
863 // Verify that the inactive remset is clear when resetting the page for
864 // in-place relocation.
865 if (from_page->age() == ZPageAge::old) {
866 if (ZGeneration::old()->active_remset_is_current()) {
867 to_page->verify_remset_cleared_previous();
868 } else {
869 to_page->verify_remset_cleared_current();
870 }
871 }
872
873 // Clear remset bits for all objects that were relocated
874 // before this page became an in-place relocated page.
875 start_in_place_relocation_prepare_remset(from_page);
876
877 if (promotion) {
878 // Register the promotion
879 ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
880 ZGeneration::young()->register_in_place_relocate_promoted(from_page);
881 }
882
883 return to_page;
884 }
885
886 void relocate_object(oop obj) {
887 const zaddress addr = to_zaddress(obj);
888 assert(ZHeap::heap()->is_object_live(addr), "Should be live");
889
890 while (!try_relocate_object(addr)) {
891 // Allocate a new target page, or if that fails, use the page being
892 // relocated as the new target, which will cause it to be relocated
893 // in-place.
894 const ZPageAge to_age = _forwarding->to_age();
895 ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age));
896 set_target(to_age, to_page);
897 if (to_page != nullptr) {
898 continue;
899 }
900
901 // Start in-place relocation to block other threads from accessing
902 // the page, or its forwarding table, until it has been released
903 // (relocation completed).
904 to_page = start_in_place_relocation(ZAddress::offset(addr));
905 set_target(to_age, to_page);
906 }
907 }
908
909 public:
910 ZRelocateWork(Allocator* allocator, ZGeneration* generation)
911 : _allocator(allocator),
912 _forwarding(nullptr),
913 _target(),
914 _generation(generation),
915 _other_promoted(0),
916 _other_compacted(0) {}
917
918 ~ZRelocateWork() {
919 for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
920 _allocator->free_target_page(_target[i]);
921 }
922 // Report statistics on-behalf of non-worker threads
923 _generation->increase_promoted(_other_promoted);
924 _generation->increase_compacted(_other_compacted);
925 }
926
927 bool active_remset_is_current() const {
928 // Normal old-to-old relocation can treat the from-page remset as a
929 // read-only copy, and then copy over the appropriate remset bits to the
930 // cleared to-page's 'current' remset bitmap.
931 //
932 // In-place relocation is more complicated. Since, the same page is both
933 // a from-page and a to-page, we need to remove the old remset bits, and
934 // add remset bits that corresponds to the new locations of the relocated
935 // objects.
936 //
937 // Depending on how long ago (in terms of number of young GC's and the
938 // current young GC's phase), the page was allocated, the active
939 // remembered set will be in either the 'current' or 'previous' bitmap.
940 //
941 // If the active bits are in the 'previous' bitmap, we know that the
942 // 'current' bitmap was cleared at some earlier point in time, and we can
943 // simply set new bits in 'current' bitmap, and later when relocation has
944 // read all the old remset bits, we could just clear the 'previous' remset
945 // bitmap.
946 //
947 // If, on the other hand, the active bits are in the 'current' bitmap, then
948 // that bitmap will be used to both read the old remset bits, and the
949 // destination for the remset bits that we copy when an object is copied
950 // to it's new location within the page. We need to *carefully* remove all
951 // all old remset bits, without clearing out the newly set bits.
952 return ZGeneration::old()->active_remset_is_current();
953 }
954
955 void clear_remset_before_in_place_reuse(ZPage* page) {
956 if (_forwarding->from_age() != ZPageAge::old) {
957 // No remset bits
958 return;
959 }
960
961 // Clear 'previous' remset bits. For in-place relocated pages, the previous
962 // remset bits are always used, even when active_remset_is_current().
963 page->clear_remset_previous();
964 }
965
966 void finish_in_place_relocation() {
967 // We are done with the from_space copy of the page
968 _forwarding->in_place_relocation_finish();
969 }
970
971 void do_forwarding(ZForwarding* forwarding) {
972 _forwarding = forwarding;
973
974 _forwarding->page()->log_msg(" (relocate page)");
975
976 ZVerify::before_relocation(_forwarding);
977
978 // Relocate objects
979 _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
980
981 ZVerify::after_relocation(_forwarding);
982
983 // Verify
984 if (ZVerifyForwarding) {
985 _forwarding->verify();
986 }
987
988 _generation->increase_freed(_forwarding->page()->size());
989
990 // Deal with in-place relocation
991 const bool in_place = _forwarding->in_place_relocation();
992 if (in_place) {
993 finish_in_place_relocation();
994 }
995
996 // Old from-space pages need to deal with remset bits
997 if (_forwarding->from_age() == ZPageAge::old) {
998 _forwarding->relocated_remembered_fields_after_relocate();
999 }
1000
1001 // Release relocated page
1002 _forwarding->release_page();
1003
1004 if (in_place) {
1005 // Wait for all other threads to call release_page
1006 ZPage* const page = _forwarding->detach_page();
1007
1008 // Ensure that previous remset bits are cleared
1009 clear_remset_before_in_place_reuse(page);
1010
1011 page->log_msg(" (relocate page done in-place)");
1012
1013 // Different pages when promoting
1014 ZPage* const target_page = target(_forwarding->to_age());
1015 _allocator->share_target_page(target_page);
1016
1017 } else {
1018 // Wait for all other threads to call release_page
1019 ZPage* const page = _forwarding->detach_page();
1020
1021 page->log_msg(" (relocate page done normal)");
1022
1023 // Free page
1024 ZHeap::heap()->free_page(page);
1025 }
1026 }
1027 };
1028
1029 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1030 public:
1031 virtual void do_thread(Thread* thread) {
1032 JavaThread* const jt = JavaThread::cast(thread);
1033 ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1034 buffer->install_base_pointers();
1035 }
1036 };
1037
1038 // Installs the object base pointers (object starts), for the fields written
1039 // in the store buffer. The code that searches for the object start uses that
1040 // liveness information stored in the pages. That information is lost when the
1041 // pages have been relocated and then destroyed.
1042 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1043 private:
1044 ZJavaThreadsIterator _threads_iter;
1045
1046 public:
1047 ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1048 : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1049 _threads_iter(generation->id_optional()) {}
1050
1051 virtual void work() {
1052 ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1053 _threads_iter.apply(&fix_store_buffer_cl);
1054 }
1055 };
1056
1057 class ZRelocateTask : public ZRestartableTask {
1058 private:
1059 ZRelocationSetParallelIterator _iter;
1060 ZGeneration* const _generation;
1061 ZRelocateQueue* const _queue;
1062 ZRelocateSmallAllocator _small_allocator;
1063 ZRelocateMediumAllocator _medium_allocator;
1064
1065 public:
1066 ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
1067 : ZRestartableTask("ZRelocateTask"),
1068 _iter(relocation_set),
1069 _generation(relocation_set->generation()),
1070 _queue(queue),
1071 _small_allocator(_generation),
1072 _medium_allocator(_generation) {}
1073
1074 ~ZRelocateTask() {
1075 _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1076
1077 // Signal that we're not using the queue anymore. Used mostly for asserts.
1078 _queue->deactivate();
1079 }
1080
1081 virtual void work() {
1082 ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation);
1083 ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation);
1084
1085 const auto do_forwarding = [&](ZForwarding* forwarding) {
1086 ZPage* const page = forwarding->page();
1087 if (page->is_small()) {
1088 small.do_forwarding(forwarding);
1089 } else {
1090 medium.do_forwarding(forwarding);
1091 }
1092
1093 // Absolute last thing done while relocating a page.
1094 //
1095 // We don't use the SuspendibleThreadSet when relocating pages.
1096 // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1097 //
1098 // After the mark_done call a safepointing could be completed and a
1099 // new GC phase could be entered.
1100 forwarding->mark_done();
1101 };
1102
1103 const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1104 if (forwarding->claim()) {
1105 do_forwarding(forwarding);
1106 }
1107 };
1108
1109 const auto do_forwarding_one_from_iter = [&]() {
1110 ZForwarding* forwarding;
1111
1112 if (_iter.next(&forwarding)) {
1113 claim_and_do_forwarding(forwarding);
1114 return true;
1115 }
1116
1117 return false;
1118 };
1119
1120 for (;;) {
1121 // As long as there are requests in the relocate queue, there are threads
1122 // waiting in a VM state that does not allow them to be blocked. The
1123 // worker thread needs to finish relocate these pages, and allow the
1124 // other threads to continue and proceed to a blocking state. After that,
1125 // the worker threads are allowed to safepoint synchronize.
1126 for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1127 do_forwarding(forwarding);
1128 }
1129
1130 if (!do_forwarding_one_from_iter()) {
1131 // No more work
1132 break;
1133 }
1134
1135 if (_generation->should_worker_resize()) {
1136 break;
1137 }
1138 }
1139
1140 _queue->leave();
1141 }
1142
1143 virtual void resize_workers(uint nworkers) {
1144 _queue->resize_workers(nworkers);
1145 }
1146 };
1147
1148 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1149 const zpointer ptr = Atomic::load(p);
1150
1151 if (ZPointer::is_store_good(ptr)) {
1152 // Already has a remset entry
1153 return;
1154 }
1155
1156 // Remset entries are used for two reasons:
1157 // 1) Young marking old-to-young pointer roots
1158 // 2) Deferred remapping of stale old-to-young pointers
1159 //
1160 // This load barrier will up-front perform the remapping of (2),
1161 // and the code below only has to make sure we register up-to-date
1162 // old-to-young pointers for (1).
1163 const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1164
1165 if (is_null(addr)) {
1166 // No need for remset entries for null pointers
1167 return;
1168 }
1169
1170 if (ZHeap::heap()->is_old(addr)) {
1171 // No need for remset entries for pointers to old gen
1172 return;
1173 }
1174
1175 ZRelocate::add_remset(p);
1176 }
1177
1178 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1179 private:
1180 ZStatTimerYoung _timer;
1181 ZArrayParallelIterator<ZPage*> _iter;
1182
1183 public:
1184 ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1185 : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1186 _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1187 _iter(pages) {}
1188
1189 virtual void work() {
1190 SuspendibleThreadSetJoiner sts_joiner;
1191 ZStringDedupContext string_dedup_context;
1192
1193 for (ZPage* page; _iter.next(&page);) {
1194 page->object_iterate([&](oop obj) {
1195 // Remap oops and add remset if needed
1196 ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1197
1198 // String dedup
1199 string_dedup_context.request(obj);
1200 });
1201
1202 SuspendibleThreadSet::yield();
1203 if (ZGeneration::young()->should_worker_resize()) {
1204 return;
1205 }
1206 }
1207 }
1208 };
1209
1210 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1211 {
1212 // Install the store buffer's base pointers before the
1213 // relocate task destroys the liveness information in
1214 // the relocated pages.
1215 ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1216 workers()->run(&buffer_task);
1217 }
1218
1219 {
1220 ZRelocateTask relocate_task(relocation_set, &_queue);
1221 workers()->run(&relocate_task);
1222 }
1223
1224 if (relocation_set->generation()->is_young()) {
1225 ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1226 workers()->run(&task);
1227 }
1228 }
1229
1230 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1231 if (from_age == ZPageAge::old) {
1232 return ZPageAge::old;
1233 }
1234
1235 const uint age = untype(from_age);
1236 if (age >= ZGeneration::young()->tenuring_threshold()) {
1237 return ZPageAge::old;
1238 }
1239
1240 return to_zpageage(age + 1);
1241 }
1242
1243 class ZFlipAgePagesTask : public ZTask {
1244 private:
1245 ZArrayParallelIterator<ZPage*> _iter;
1246
1247 public:
1248 ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1249 : ZTask("ZPromotePagesTask"),
1250 _iter(pages) {}
1251
1252 virtual void work() {
1253 SuspendibleThreadSetJoiner sts_joiner;
1254 ZArray<ZPage*> promoted_pages;
1255
1256 for (ZPage* prev_page; _iter.next(&prev_page);) {
1257 const ZPageAge from_age = prev_page->age();
1258 const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1259 assert(from_age != ZPageAge::old, "invalid age for a young collection");
1260
1261 // Figure out if this is proper promotion
1262 const bool promotion = to_age == ZPageAge::old;
1263
1264 if (promotion) {
1265 // Before promoting an object (and before relocate start), we must ensure that all
1266 // contained zpointers are store good. The marking code ensures that for non-null
1267 // pointers, but null pointers are ignored. This code ensures that even null pointers
1268 // are made store good, for the promoted objects.
1269 prev_page->object_iterate([&](oop obj) {
1270 ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1271 });
1272 }
1273
1274 // Logging
1275 prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1276
1277 // Setup to-space page
1278 ZPage* const new_page = promotion
1279 ? prev_page->clone_for_promotion()
1280 : prev_page->reset(to_age);
1281
1282 // Reset page for flip aging
1283 new_page->reset_livemap();
1284
1285 if (promotion) {
1286 ZGeneration::young()->flip_promote(prev_page, new_page);
1287 // Defer promoted page registration times the lock is taken
1288 promoted_pages.push(prev_page);
1289 }
1290
1291 SuspendibleThreadSet::yield();
1292 }
1293
1294 ZGeneration::young()->register_flip_promoted(promoted_pages);
1295 }
1296 };
1297
1298 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1299 ZFlipAgePagesTask flip_age_task(pages);
1300 workers()->run(&flip_age_task);
1301 }
1302
1303 void ZRelocate::synchronize() {
1304 _queue.synchronize();
1305 }
1306
1307 void ZRelocate::desynchronize() {
1308 _queue.desynchronize();
1309 }
1310
1311 ZRelocateQueue* ZRelocate::queue() {
1312 return &_queue;
1313 }
1314
1315 bool ZRelocate::is_queue_active() const {
1316 return _queue.is_active();
1317 }