1 /*
2 * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "gc/shared/gc_globals.hpp"
25 #include "gc/shared/suspendibleThreadSet.hpp"
26 #include "gc/z/zAbort.inline.hpp"
27 #include "gc/z/zAddress.inline.hpp"
28 #include "gc/z/zAllocator.inline.hpp"
29 #include "gc/z/zBarrier.inline.hpp"
30 #include "gc/z/zCollectedHeap.hpp"
31 #include "gc/z/zForwarding.inline.hpp"
32 #include "gc/z/zGeneration.inline.hpp"
33 #include "gc/z/zHeap.inline.hpp"
34 #include "gc/z/zIndexDistributor.inline.hpp"
35 #include "gc/z/zIterator.inline.hpp"
36 #include "gc/z/zPage.inline.hpp"
37 #include "gc/z/zPageAge.hpp"
38 #include "gc/z/zRelocate.hpp"
39 #include "gc/z/zRelocationSet.inline.hpp"
40 #include "gc/z/zRootsIterator.hpp"
41 #include "gc/z/zStackWatermark.hpp"
42 #include "gc/z/zStat.hpp"
43 #include "gc/z/zStringDedup.inline.hpp"
44 #include "gc/z/zTask.hpp"
45 #include "gc/z/zUncoloredRoot.inline.hpp"
46 #include "gc/z/zVerify.hpp"
47 #include "gc/z/zWorkers.hpp"
48 #include "prims/jvmtiTagMap.hpp"
49 #include "runtime/atomic.hpp"
50 #include "utilities/debug.hpp"
51
52 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
53 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
54
55 ZRelocateQueue::ZRelocateQueue()
56 : _lock(),
57 _queue(),
58 _nworkers(0),
59 _nsynchronized(0),
60 _synchronize(false),
61 _is_active(false),
62 _needs_attention(0) {}
63
64 bool ZRelocateQueue::needs_attention() const {
65 return Atomic::load(&_needs_attention) != 0;
66 }
67
68 void ZRelocateQueue::inc_needs_attention() {
69 const int needs_attention = Atomic::add(&_needs_attention, 1);
70 assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
71 }
72
73 void ZRelocateQueue::dec_needs_attention() {
74 const int needs_attention = Atomic::sub(&_needs_attention, 1);
75 assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
76 }
77
78 void ZRelocateQueue::activate(uint nworkers) {
79 _is_active = true;
80 join(nworkers);
81 }
82
83 void ZRelocateQueue::deactivate() {
84 Atomic::store(&_is_active, false);
85 clear();
86 }
87
88 bool ZRelocateQueue::is_active() const {
89 return Atomic::load(&_is_active);
90 }
91
92 void ZRelocateQueue::join(uint nworkers) {
93 assert(nworkers != 0, "Must request at least one worker");
94 assert(_nworkers == 0, "Invalid state");
95 assert(_nsynchronized == 0, "Invalid state");
96
97 log_debug(gc, reloc)("Joining workers: %u", nworkers);
98
99 _nworkers = nworkers;
100 }
101
102 void ZRelocateQueue::resize_workers(uint nworkers) {
103 assert(nworkers != 0, "Must request at least one worker");
104 assert(_nworkers == 0, "Invalid state");
105 assert(_nsynchronized == 0, "Invalid state");
106
107 log_debug(gc, reloc)("Resize workers: %u", nworkers);
108
109 ZLocker<ZConditionLock> locker(&_lock);
110 _nworkers = nworkers;
111 }
112
113 void ZRelocateQueue::leave() {
114 ZLocker<ZConditionLock> locker(&_lock);
115 _nworkers--;
116
117 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
118
119 log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
120
121 // Prune done forwardings
122 const bool forwardings_done = prune();
123
124 // Check if all workers synchronized
125 const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
126
127 if (forwardings_done || last_synchronized) {
128 _lock.notify_all();
129 }
130 }
131
132 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
133 ZStatTimer timer(ZCriticalPhaseRelocationStall);
134 ZLocker<ZConditionLock> locker(&_lock);
135
136 if (forwarding->is_done()) {
137 return;
138 }
139
140 _queue.append(forwarding);
141 if (_queue.length() == 1) {
142 // Queue became non-empty
143 inc_needs_attention();
144 _lock.notify_all();
145 }
146
147 while (!forwarding->is_done()) {
148 _lock.wait();
149 }
150 }
151
152 bool ZRelocateQueue::prune() {
153 if (_queue.is_empty()) {
154 return false;
155 }
156
157 bool done = false;
158
159 for (int i = 0; i < _queue.length();) {
160 const ZForwarding* const forwarding = _queue.at(i);
161 if (forwarding->is_done()) {
162 done = true;
163
164 _queue.delete_at(i);
165 } else {
166 i++;
167 }
168 }
169
170 if (_queue.is_empty()) {
171 dec_needs_attention();
172 }
173
174 return done;
175 }
176
177 ZForwarding* ZRelocateQueue::prune_and_claim() {
178 if (prune()) {
179 _lock.notify_all();
180 }
181
182 for (int i = 0; i < _queue.length(); i++) {
183 ZForwarding* const forwarding = _queue.at(i);
184 if (forwarding->claim()) {
185 return forwarding;
186 }
187 }
188
189 return nullptr;
190 }
191
192 class ZRelocateQueueSynchronizeThread {
193 private:
194 ZRelocateQueue* const _queue;
195
196 public:
197 ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
198 : _queue(queue) {
199 _queue->synchronize_thread();
200 }
201
202 ~ZRelocateQueueSynchronizeThread() {
203 _queue->desynchronize_thread();
204 }
205 };
206
207 void ZRelocateQueue::synchronize_thread() {
208 _nsynchronized++;
209
210 log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
211
212 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
213 if (_nsynchronized == _nworkers) {
214 // All workers synchronized
215 _lock.notify_all();
216 }
217 }
218
219 void ZRelocateQueue::desynchronize_thread() {
220 _nsynchronized--;
221
222 log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
223
224 assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
225 }
226
227 ZForwarding* ZRelocateQueue::synchronize_poll() {
228 // Fast path avoids locking
229 if (!needs_attention()) {
230 return nullptr;
231 }
232
233 // Slow path to get the next forwarding and/or synchronize
234 ZLocker<ZConditionLock> locker(&_lock);
235
236 {
237 ZForwarding* const forwarding = prune_and_claim();
238 if (forwarding != nullptr) {
239 // Don't become synchronized while there are elements in the queue
240 return forwarding;
241 }
242 }
243
244 if (!_synchronize) {
245 return nullptr;
246 }
247
248 ZRelocateQueueSynchronizeThread rqst(this);
249
250 do {
251 _lock.wait();
252
253 ZForwarding* const forwarding = prune_and_claim();
254 if (forwarding != nullptr) {
255 return forwarding;
256 }
257 } while (_synchronize);
258
259 return nullptr;
260 }
261
262 void ZRelocateQueue::clear() {
263 assert(_nworkers == 0, "Invalid state");
264
265 if (_queue.is_empty()) {
266 return;
267 }
268
269 ZArrayIterator<ZForwarding*> iter(&_queue);
270 for (ZForwarding* forwarding; iter.next(&forwarding);) {
271 assert(forwarding->is_done(), "All should be done");
272 }
273
274 assert(false, "Clear was not empty");
275
276 _queue.clear();
277 dec_needs_attention();
278 }
279
280 void ZRelocateQueue::synchronize() {
281 ZLocker<ZConditionLock> locker(&_lock);
282 _synchronize = true;
283
284 inc_needs_attention();
285
286 log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
287
288 while (_nworkers != _nsynchronized) {
289 _lock.wait();
290 log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
291 }
292 }
293
294 void ZRelocateQueue::desynchronize() {
295 ZLocker<ZConditionLock> locker(&_lock);
296 _synchronize = false;
297
298 log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
299
300 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
301
302 dec_needs_attention();
303
304 _lock.notify_all();
305 }
306
307 ZRelocate::ZRelocate(ZGeneration* generation)
308 : _generation(generation),
309 _queue() {}
310
311 ZWorkers* ZRelocate::workers() const {
312 return _generation->workers();
313 }
314
315 void ZRelocate::start() {
316 _queue.activate(workers()->active_workers());
317 }
318
319 void ZRelocate::add_remset(volatile zpointer* p) {
320 ZGeneration::young()->remember(p);
321 }
322
323 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
324 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
325
326 // Allocate object
327 const size_t old_size = ZUtils::object_size(from_addr);
328 const size_t size = ZUtils::copy_size(from_addr, old_size);
329
330 ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
331
332 const zaddress to_addr = allocator->alloc_object(size);
333
334 if (is_null(to_addr)) {
335 // Allocation failed
336 return zaddress::null;
337 }
338 assert(to_addr != from_addr, "addresses must be different");
339
340 // Copy object
341 ZUtils::object_copy_disjoint(from_addr, to_addr, old_size);
342 ZUtils::initialize_hash_if_necessary(to_addr, from_addr);
343
344 // Insert forwarding
345 const zaddress to_addr_final = forwarding->insert(from_addr, to_addr, cursor);
346
347 if (to_addr_final != to_addr) {
348 // Already relocated, try undo allocation
349 allocator->undo_alloc_object(to_addr, size);
350 }
351
352 return to_addr_final;
353 }
354
355 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
356 ZForwardingCursor cursor;
357
358 // Lookup forwarding
359 zaddress to_addr = forwarding->find(from_addr, &cursor);
360 if (!is_null(to_addr)) {
361 // Already relocated
362 return to_addr;
363 }
364
365 // Relocate object
366 if (forwarding->retain_page(&_queue)) {
367 assert(_generation->is_phase_relocate(), "Must be");
368 to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
369 forwarding->release_page();
370
371 if (!is_null(to_addr)) {
372 // Success
373 return to_addr;
374 }
375
376 // Failed to relocate object. Signal and wait for a worker thread to
377 // complete relocation of this page, and then forward the object.
378 _queue.add_and_wait(forwarding);
379 }
380
381 // Forward object
382 return forward_object(forwarding, from_addr);
383 }
384
385 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
386 const zaddress to_addr = forwarding->find(from_addr);
387 assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
388 return to_addr;
389 }
390
391 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) {
392 if (ZStressRelocateInPlace) {
393 // Simulate failure to allocate a new page. This will
394 // cause the page being relocated to be relocated in-place.
395 return nullptr;
396 }
397
398 ZAllocationFlags flags;
399 flags.set_non_blocking();
400 flags.set_gc_relocation();
401
402 return allocator->alloc_page_for_relocation(type, size, flags);
403 }
404
405 static void retire_target_page(ZGeneration* generation, ZPage* page) {
406 if (generation->is_young() && page->is_old()) {
407 generation->increase_promoted(page->used());
408 } else {
409 generation->increase_compacted(page->used());
410 }
411
412 // Free target page if it is empty. We can end up with an empty target
413 // page if we allocated a new target page, and then lost the race to
414 // relocate the remaining objects, leaving the target page empty when
415 // relocation completed.
416 if (page->used() == 0) {
417 ZHeap::heap()->free_page(page);
418 }
419 }
420
421 class ZRelocateSmallAllocator {
422 private:
423 ZGeneration* const _generation;
424 volatile size_t _in_place_count;
425
426 public:
427 ZRelocateSmallAllocator(ZGeneration* generation)
428 : _generation(generation),
429 _in_place_count(0) {}
430
431 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
432 ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
433 ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size());
434 if (page == nullptr) {
435 Atomic::inc(&_in_place_count);
436 }
437
438 if (target != nullptr) {
439 // Retire the old target page
440 retire_target_page(_generation, target);
441 }
442
443 return page;
444 }
445
446 void share_target_page(ZPage* page) {
447 // Does nothing
448 }
449
450 void free_target_page(ZPage* page) {
451 if (page != nullptr) {
452 retire_target_page(_generation, page);
453 }
454 }
455
456 zaddress alloc_object(ZPage* page, size_t size) const {
457 return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
458 }
459
460 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
461 page->undo_alloc_object(addr, size);
462 }
463
464 size_t in_place_count() const {
465 return _in_place_count;
466 }
467 };
468
469 class ZRelocateMediumAllocator {
470 private:
471 ZGeneration* const _generation;
472 ZConditionLock _lock;
473 ZPage* _shared[ZAllocator::_relocation_allocators];
474 bool _in_place;
475 volatile size_t _in_place_count;
476
477 public:
478 ZRelocateMediumAllocator(ZGeneration* generation)
479 : _generation(generation),
480 _lock(),
481 _shared(),
482 _in_place(false),
483 _in_place_count(0) {}
484
485 ~ZRelocateMediumAllocator() {
486 for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
487 if (_shared[i] != nullptr) {
488 retire_target_page(_generation, _shared[i]);
489 }
490 }
491 }
492
493 ZPage* shared(ZPageAge age) {
494 return _shared[untype(age - 1)];
495 }
496
497 void set_shared(ZPageAge age, ZPage* page) {
498 _shared[untype(age - 1)] = page;
499 }
500
501 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
502 ZLocker<ZConditionLock> locker(&_lock);
503
504 // Wait for any ongoing in-place relocation to complete
505 while (_in_place) {
506 _lock.wait();
507 }
508
509 // Allocate a new page only if the shared page is the same as the
510 // current target page. The shared page will be different from the
511 // current target page if another thread shared a page, or allocated
512 // a new page.
513 const ZPageAge to_age = forwarding->to_age();
514 if (shared(to_age) == target) {
515 ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
516 ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size());
517 set_shared(to_age, to_page);
518 if (to_page == nullptr) {
519 Atomic::inc(&_in_place_count);
520 _in_place = true;
521 }
522
523 // This thread is responsible for retiring the shared target page
524 if (target != nullptr) {
525 retire_target_page(_generation, target);
526 }
527 }
528
529 return shared(to_age);
530 }
531
532 void share_target_page(ZPage* page) {
533 const ZPageAge age = page->age();
534
535 ZLocker<ZConditionLock> locker(&_lock);
536 assert(_in_place, "Invalid state");
537 assert(shared(age) == nullptr, "Invalid state");
538 assert(page != nullptr, "Invalid page");
539
540 set_shared(age, page);
541 _in_place = false;
542
543 _lock.notify_all();
544 }
545
546 void free_target_page(ZPage* page) {
547 // Does nothing
548 }
549
550 zaddress alloc_object(ZPage* page, size_t size) const {
551 return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
552 }
553
554 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
555 page->undo_alloc_object_atomic(addr, size);
556 }
557
558 size_t in_place_count() const {
559 return _in_place_count;
560 }
561 };
562
563 template <typename Allocator>
564 class ZRelocateWork : public StackObj {
565 private:
566 Allocator* const _allocator;
567 ZForwarding* _forwarding;
568 ZPage* _target[ZAllocator::_relocation_allocators];
569 ZGeneration* const _generation;
570 size_t _other_promoted;
571 size_t _other_compacted;
572 ZStringDedupContext _string_dedup_context;
573
574
575 ZPage* target(ZPageAge age) {
576 return _target[untype(age - 1)];
577 }
578
579 void set_target(ZPageAge age, ZPage* page) {
580 _target[untype(age - 1)] = page;
581 }
582
583 size_t object_alignment() const {
584 return (size_t)1 << _forwarding->object_alignment_shift();
585 }
586
587 void increase_other_forwarded(size_t unaligned_object_size) {
588 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
589 if (_forwarding->is_promotion()) {
590 _other_promoted += aligned_size;
591 } else {
592 _other_compacted += aligned_size;
593 }
594 }
595
596 zaddress try_relocate_object_inner(zaddress from_addr, size_t old_size) {
597 ZForwardingCursor cursor;
598 ZPage* const to_page = target(_forwarding->to_age());
599 zoffset_end from_offset = to_zoffset_end(ZAddress::offset(from_addr));
600 zoffset_end top = to_page != nullptr ? to_page->top() : to_zoffset_end(0);
601 const size_t new_size = ZUtils::copy_size(from_addr, old_size);
602 const size_t size = top == from_offset ? old_size : new_size;
603
604 // Lookup forwarding
605 {
606 const zaddress to_addr = _forwarding->find(from_addr, &cursor);
607 if (!is_null(to_addr)) {
608 // Already relocated
609 increase_other_forwarded(size);
610 return to_addr;
611 }
612 }
613
614 // Allocate object
615 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
616 if (is_null(allocated_addr)) {
617 // Allocation failed
618 return zaddress::null;
619 }
620 if (old_size != new_size && ((top == from_offset) != (allocated_addr == from_addr))) {
621 _allocator->undo_alloc_object(to_page, allocated_addr, size);
622 return zaddress::null;
623 }
624
625 // Copy object. Use conjoint copying if we are relocating
626 // in-place and the new object overlaps with the old object.
627 if (_forwarding->in_place_relocation() && allocated_addr + old_size > from_addr) {
628 ZUtils::object_copy_conjoint(from_addr, allocated_addr, old_size);
629 } else {
630 ZUtils::object_copy_disjoint(from_addr, allocated_addr, old_size);
631 }
632 if (from_addr != allocated_addr) {
633 ZUtils::initialize_hash_if_necessary(allocated_addr, from_addr);
634 }
635
636 // Insert forwarding
637 const zaddress to_addr = _forwarding->insert(from_addr, allocated_addr, &cursor);
638 if (to_addr != allocated_addr) {
639 // Already relocated, undo allocation
640 _allocator->undo_alloc_object(to_page, to_addr, size);
641 increase_other_forwarded(size);
642 }
643
644 return to_addr;
645 }
646
647 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr, size_t size) const {
648 // Old-to-old relocation - move existing remset bits
649
650 // If this is called for an in-place relocated page, then this code has the
651 // responsibility to clear the old remset bits. Extra care is needed because:
652 //
653 // 1) The to-object copy can overlap with the from-object copy
654 // 2) Remset bits of old objects need to be cleared
655 //
656 // A watermark is used to keep track of how far the old remset bits have been removed.
657
658 const bool in_place = _forwarding->in_place_relocation();
659 ZPage* const from_page = _forwarding->page();
660 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
661
662 // Note: even with in-place relocation, the to_page could be another page
663 ZPage* const to_page = ZHeap::heap()->page(to_addr);
664
665 // Uses _relaxed version to handle that in-place relocation resets _top
666 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
667 assert(to_page->is_in(to_addr), "Must be");
668
669 assert(size <= ZUtils::object_size(to_addr), "old size must be <= new size");
670 assert(size > 0, "size must be set");
671
672 // If a young generation collection started while the old generation
673 // relocated objects, the remember set bits were flipped from "current"
674 // to "previous".
675 //
676 // We need to select the correct remembered sets bitmap to ensure that the
677 // old remset bits are found.
678 //
679 // Note that if the young generation marking (remset scanning) finishes
680 // before the old generation relocation has relocated this page, then the
681 // young generation will visit this page's previous remembered set bits and
682 // moved them over to the current bitmap.
683 //
684 // If the young generation runs multiple cycles while the old generation is
685 // relocating, then the first cycle will have consumed the old remset,
686 // bits and moved associated objects to a new old page. The old relocation
687 // could find either of the two bitmaps. So, either it will find the original
688 // remset bits for the page, or it will find an empty bitmap for the page. It
689 // doesn't matter for correctness, because the young generation marking has
690 // already taken care of the bits.
691
692 const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
693
694 // When in-place relocation is done and the old remset bits are located in
695 // the bitmap that is going to be used for the new remset bits, then we
696 // need to clear the old bits before the new bits are inserted.
697 const bool iterate_current_remset = active_remset_is_current && !in_place;
698
699 BitMap::Iterator iter = iterate_current_remset
700 ? from_page->remset_iterator_limited_current(from_local_offset, size)
701 : from_page->remset_iterator_limited_previous(from_local_offset, size);
702
703 for (BitMap::idx_t field_bit : iter) {
704 const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
705
706 // Add remset entry in the to-page
707 const uintptr_t offset = field_local_offset - from_local_offset;
708 const zaddress to_field = to_addr + offset;
709 log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
710 untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
711
712 volatile zpointer* const p = (volatile zpointer*)to_field;
713
714 if (ZGeneration::young()->is_phase_mark()) {
715 // Young generation remembered set scanning needs to know about this
716 // field. It will take responsibility to add a new remember set entry if needed.
717 _forwarding->relocated_remembered_fields_register(p);
718 } else {
719 to_page->remember(p);
720 if (in_place) {
721 assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
722 }
723 }
724 }
725 }
726
727 static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
728 if (ZHeap::heap()->is_young(addr)) {
729 ZRelocate::add_remset(p);
730 return true;
731 }
732
733 return false;
734 }
735
736 static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
737 const zpointer ptr = Atomic::load(p);
738
739 assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
740
741 if (ZPointer::is_store_good(ptr)) {
742 // Already has a remset entry
743 return;
744 }
745
746 if (ZPointer::is_load_good(ptr)) {
747 if (!is_null_any(ptr)) {
748 const zaddress addr = ZPointer::uncolor(ptr);
749 add_remset_if_young(p, addr);
750 }
751 // No need to remap it is already load good
752 return;
753 }
754
755 if (is_null_any(ptr)) {
756 // Eagerly remap to skip adding a remset entry just to get deferred remapping
757 ZBarrier::remap_young_relocated(p, ptr);
758 return;
759 }
760
761 const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
762 ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
763
764 if (forwarding == nullptr) {
765 // Object isn't being relocated
766 const zaddress addr = safe(addr_unsafe);
767 if (!add_remset_if_young(p, addr)) {
768 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
769 ZBarrier::remap_young_relocated(p, ptr);
770 }
771 return;
772 }
773
774 const zaddress addr = forwarding->find(addr_unsafe);
775
776 if (!is_null(addr)) {
777 // Object has already been relocated
778 if (!add_remset_if_young(p, addr)) {
779 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
780 ZBarrier::remap_young_relocated(p, ptr);
781 }
782 return;
783 }
784
785 // Object has not been relocated yet
786 // Don't want to eagerly relocate objects, so just add a remset
787 ZRelocate::add_remset(p);
788 return;
789 }
790
791 void update_remset_promoted(zaddress to_addr) const {
792 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
793 }
794
795 void update_remset_for_fields(zaddress from_addr, zaddress to_addr, size_t size) const {
796 if (_forwarding->to_age() != ZPageAge::old) {
797 // No remembered set in young pages
798 return;
799 }
800
801 // Need to deal with remset when moving objects to the old generation
802 if (_forwarding->from_age() == ZPageAge::old) {
803 update_remset_old_to_old(from_addr, to_addr, size);
804 return;
805 }
806
807 // Normal promotion
808 update_remset_promoted(to_addr);
809 }
810
811 void maybe_string_dedup(zaddress to_addr) {
812 if (_forwarding->is_promotion()) {
813 // Only deduplicate promoted objects, and let short-lived strings simply die instead.
814 _string_dedup_context.request(to_oop(to_addr));
815 }
816 }
817
818 bool try_relocate_object(zaddress from_addr) {
819 size_t size = ZUtils::object_size(from_addr);
820 const zaddress to_addr = try_relocate_object_inner(from_addr, size);
821
822 if (is_null(to_addr)) {
823 return false;
824 }
825
826 update_remset_for_fields(from_addr, to_addr, size);
827
828 maybe_string_dedup(to_addr);
829
830 return true;
831 }
832
833 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
834 if (_forwarding->from_age() != ZPageAge::old) {
835 // Only old pages have use remset bits
836 return;
837 }
838
839 if (ZGeneration::old()->active_remset_is_current()) {
840 // We want to iterate over and clear the remset bits of the from-space page,
841 // and insert current bits in the to-space page. However, with in-place
842 // relocation, the from-space and to-space pages are the same. Clearing
843 // is destructive, and is difficult to perform before or during the iteration.
844 // However, clearing of the current bits has to be done before exposing the
845 // to-space objects in the forwarding table.
846 //
847 // To solve this tricky dependency problem, we start by stashing away the
848 // current bits in the previous bits, and clearing the current bits
849 // (implemented by swapping the bits). This way, the current bits are
850 // cleared before copying the objects (like a normal to-space page),
851 // and the previous bits are representing a copy of the current bits
852 // of the from-space page, and are used for iteration.
853 from_page->swap_remset_bitmaps();
854 }
855 }
856
857 ZPage* start_in_place_relocation(zoffset relocated_watermark) {
858 _forwarding->in_place_relocation_claim_page();
859 _forwarding->in_place_relocation_start(relocated_watermark);
860
861 ZPage* const from_page = _forwarding->page();
862
863 const ZPageAge to_age = _forwarding->to_age();
864 const bool promotion = _forwarding->is_promotion();
865
866 // Promotions happen through a new cloned page
867 ZPage* const to_page = promotion
868 ? from_page->clone_for_promotion()
869 : from_page->reset(to_age);
870
871 // Reset page for in-place relocation
872 to_page->reset_top_for_allocation();
873
874 // Verify that the inactive remset is clear when resetting the page for
875 // in-place relocation.
876 if (from_page->age() == ZPageAge::old) {
877 if (ZGeneration::old()->active_remset_is_current()) {
878 to_page->verify_remset_cleared_previous();
879 } else {
880 to_page->verify_remset_cleared_current();
881 }
882 }
883
884 // Clear remset bits for all objects that were relocated
885 // before this page became an in-place relocated page.
886 start_in_place_relocation_prepare_remset(from_page);
887
888 if (promotion) {
889 // Register the promotion
890 ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
891 ZGeneration::young()->register_in_place_relocate_promoted(from_page);
892 }
893
894 return to_page;
895 }
896
897 void relocate_object(oop obj) {
898 const zaddress addr = to_zaddress(obj);
899 assert(ZHeap::heap()->is_object_live(addr), "Should be live");
900
901 while (!try_relocate_object(addr)) {
902 // Allocate a new target page, or if that fails, use the page being
903 // relocated as the new target, which will cause it to be relocated
904 // in-place.
905 const ZPageAge to_age = _forwarding->to_age();
906 ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age));
907 set_target(to_age, to_page);
908 if (to_page != nullptr) {
909 continue;
910 }
911
912 // Start in-place relocation to block other threads from accessing
913 // the page, or its forwarding table, until it has been released
914 // (relocation completed).
915 to_page = start_in_place_relocation(ZAddress::offset(addr));
916 set_target(to_age, to_page);
917 }
918 }
919
920 public:
921 ZRelocateWork(Allocator* allocator, ZGeneration* generation)
922 : _allocator(allocator),
923 _forwarding(nullptr),
924 _target(),
925 _generation(generation),
926 _other_promoted(0),
927 _other_compacted(0) {}
928
929 ~ZRelocateWork() {
930 for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
931 _allocator->free_target_page(_target[i]);
932 }
933 // Report statistics on-behalf of non-worker threads
934 _generation->increase_promoted(_other_promoted);
935 _generation->increase_compacted(_other_compacted);
936 }
937
938 bool active_remset_is_current() const {
939 // Normal old-to-old relocation can treat the from-page remset as a
940 // read-only copy, and then copy over the appropriate remset bits to the
941 // cleared to-page's 'current' remset bitmap.
942 //
943 // In-place relocation is more complicated. Since, the same page is both
944 // a from-page and a to-page, we need to remove the old remset bits, and
945 // add remset bits that corresponds to the new locations of the relocated
946 // objects.
947 //
948 // Depending on how long ago (in terms of number of young GC's and the
949 // current young GC's phase), the page was allocated, the active
950 // remembered set will be in either the 'current' or 'previous' bitmap.
951 //
952 // If the active bits are in the 'previous' bitmap, we know that the
953 // 'current' bitmap was cleared at some earlier point in time, and we can
954 // simply set new bits in 'current' bitmap, and later when relocation has
955 // read all the old remset bits, we could just clear the 'previous' remset
956 // bitmap.
957 //
958 // If, on the other hand, the active bits are in the 'current' bitmap, then
959 // that bitmap will be used to both read the old remset bits, and the
960 // destination for the remset bits that we copy when an object is copied
961 // to it's new location within the page. We need to *carefully* remove all
962 // all old remset bits, without clearing out the newly set bits.
963 return ZGeneration::old()->active_remset_is_current();
964 }
965
966 void clear_remset_before_in_place_reuse(ZPage* page) {
967 if (_forwarding->from_age() != ZPageAge::old) {
968 // No remset bits
969 return;
970 }
971
972 // Clear 'previous' remset bits. For in-place relocated pages, the previous
973 // remset bits are always used, even when active_remset_is_current().
974 page->clear_remset_previous();
975 }
976
977 void finish_in_place_relocation() {
978 // We are done with the from_space copy of the page
979 _forwarding->in_place_relocation_finish();
980 }
981
982 void do_forwarding(ZForwarding* forwarding) {
983 _forwarding = forwarding;
984
985 _forwarding->page()->log_msg(" (relocate page)");
986
987 ZVerify::before_relocation(_forwarding);
988
989 // Relocate objects
990 _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
991
992 ZVerify::after_relocation(_forwarding);
993
994 // Verify
995 if (ZVerifyForwarding) {
996 _forwarding->verify();
997 }
998
999 _generation->increase_freed(_forwarding->page()->size());
1000
1001 // Deal with in-place relocation
1002 const bool in_place = _forwarding->in_place_relocation();
1003 if (in_place) {
1004 finish_in_place_relocation();
1005 }
1006
1007 // Old from-space pages need to deal with remset bits
1008 if (_forwarding->from_age() == ZPageAge::old) {
1009 _forwarding->relocated_remembered_fields_after_relocate();
1010 }
1011
1012 // Release relocated page
1013 _forwarding->release_page();
1014
1015 if (in_place) {
1016 // Wait for all other threads to call release_page
1017 ZPage* const page = _forwarding->detach_page();
1018
1019 // Ensure that previous remset bits are cleared
1020 clear_remset_before_in_place_reuse(page);
1021
1022 page->log_msg(" (relocate page done in-place)");
1023
1024 // Different pages when promoting
1025 ZPage* const target_page = target(_forwarding->to_age());
1026 _allocator->share_target_page(target_page);
1027
1028 } else {
1029 // Wait for all other threads to call release_page
1030 ZPage* const page = _forwarding->detach_page();
1031
1032 page->log_msg(" (relocate page done normal)");
1033
1034 // Free page
1035 ZHeap::heap()->free_page(page);
1036 }
1037 }
1038 };
1039
1040 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1041 public:
1042 virtual void do_thread(Thread* thread) {
1043 JavaThread* const jt = JavaThread::cast(thread);
1044 ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1045 buffer->install_base_pointers();
1046 }
1047 };
1048
1049 // Installs the object base pointers (object starts), for the fields written
1050 // in the store buffer. The code that searches for the object start uses that
1051 // liveness information stored in the pages. That information is lost when the
1052 // pages have been relocated and then destroyed.
1053 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1054 private:
1055 ZJavaThreadsIterator _threads_iter;
1056
1057 public:
1058 ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1059 : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1060 _threads_iter(generation->id_optional()) {}
1061
1062 virtual void work() {
1063 ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1064 _threads_iter.apply(&fix_store_buffer_cl);
1065 }
1066 };
1067
1068 class ZRelocateTask : public ZRestartableTask {
1069 private:
1070 ZRelocationSetParallelIterator _iter;
1071 ZGeneration* const _generation;
1072 ZRelocateQueue* const _queue;
1073 ZRelocateSmallAllocator _small_allocator;
1074 ZRelocateMediumAllocator _medium_allocator;
1075
1076 public:
1077 ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
1078 : ZRestartableTask("ZRelocateTask"),
1079 _iter(relocation_set),
1080 _generation(relocation_set->generation()),
1081 _queue(queue),
1082 _small_allocator(_generation),
1083 _medium_allocator(_generation) {}
1084
1085 ~ZRelocateTask() {
1086 _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1087
1088 // Signal that we're not using the queue anymore. Used mostly for asserts.
1089 _queue->deactivate();
1090 }
1091
1092 virtual void work() {
1093 ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation);
1094 ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation);
1095
1096 const auto do_forwarding = [&](ZForwarding* forwarding) {
1097 ZPage* const page = forwarding->page();
1098 if (page->is_small()) {
1099 small.do_forwarding(forwarding);
1100 } else {
1101 medium.do_forwarding(forwarding);
1102 }
1103
1104 // Absolute last thing done while relocating a page.
1105 //
1106 // We don't use the SuspendibleThreadSet when relocating pages.
1107 // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1108 //
1109 // After the mark_done call a safepointing could be completed and a
1110 // new GC phase could be entered.
1111 forwarding->mark_done();
1112 };
1113
1114 const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1115 if (forwarding->claim()) {
1116 do_forwarding(forwarding);
1117 }
1118 };
1119
1120 const auto do_forwarding_one_from_iter = [&]() {
1121 ZForwarding* forwarding;
1122
1123 if (_iter.next(&forwarding)) {
1124 claim_and_do_forwarding(forwarding);
1125 return true;
1126 }
1127
1128 return false;
1129 };
1130
1131 for (;;) {
1132 // As long as there are requests in the relocate queue, there are threads
1133 // waiting in a VM state that does not allow them to be blocked. The
1134 // worker thread needs to finish relocate these pages, and allow the
1135 // other threads to continue and proceed to a blocking state. After that,
1136 // the worker threads are allowed to safepoint synchronize.
1137 for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1138 do_forwarding(forwarding);
1139 }
1140
1141 if (!do_forwarding_one_from_iter()) {
1142 // No more work
1143 break;
1144 }
1145
1146 if (_generation->should_worker_resize()) {
1147 break;
1148 }
1149 }
1150
1151 _queue->leave();
1152 }
1153
1154 virtual void resize_workers(uint nworkers) {
1155 _queue->resize_workers(nworkers);
1156 }
1157 };
1158
1159 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1160 const zpointer ptr = Atomic::load(p);
1161
1162 if (ZPointer::is_store_good(ptr)) {
1163 // Already has a remset entry
1164 return;
1165 }
1166
1167 // Remset entries are used for two reasons:
1168 // 1) Young marking old-to-young pointer roots
1169 // 2) Deferred remapping of stale old-to-young pointers
1170 //
1171 // This load barrier will up-front perform the remapping of (2),
1172 // and the code below only has to make sure we register up-to-date
1173 // old-to-young pointers for (1).
1174 const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1175
1176 if (is_null(addr)) {
1177 // No need for remset entries for null pointers
1178 return;
1179 }
1180
1181 if (ZHeap::heap()->is_old(addr)) {
1182 // No need for remset entries for pointers to old gen
1183 return;
1184 }
1185
1186 ZRelocate::add_remset(p);
1187 }
1188
1189 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1190 private:
1191 ZStatTimerYoung _timer;
1192 ZArrayParallelIterator<ZPage*> _iter;
1193
1194 public:
1195 ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1196 : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1197 _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1198 _iter(pages) {}
1199
1200 virtual void work() {
1201 SuspendibleThreadSetJoiner sts_joiner;
1202 ZStringDedupContext string_dedup_context;
1203
1204 for (ZPage* page; _iter.next(&page);) {
1205 page->object_iterate([&](oop obj) {
1206 // Remap oops and add remset if needed
1207 ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1208
1209 // String dedup
1210 string_dedup_context.request(obj);
1211 });
1212
1213 SuspendibleThreadSet::yield();
1214 if (ZGeneration::young()->should_worker_resize()) {
1215 return;
1216 }
1217 }
1218 }
1219 };
1220
1221 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1222 {
1223 // Install the store buffer's base pointers before the
1224 // relocate task destroys the liveness information in
1225 // the relocated pages.
1226 ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1227 workers()->run(&buffer_task);
1228 }
1229
1230 {
1231 ZRelocateTask relocate_task(relocation_set, &_queue);
1232 workers()->run(&relocate_task);
1233 }
1234
1235 if (relocation_set->generation()->is_young()) {
1236 ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1237 workers()->run(&task);
1238 }
1239 }
1240
1241 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1242 if (from_age == ZPageAge::old) {
1243 return ZPageAge::old;
1244 }
1245
1246 const uint age = untype(from_age);
1247 if (age >= ZGeneration::young()->tenuring_threshold()) {
1248 return ZPageAge::old;
1249 }
1250
1251 return to_zpageage(age + 1);
1252 }
1253
1254 class ZFlipAgePagesTask : public ZTask {
1255 private:
1256 ZArrayParallelIterator<ZPage*> _iter;
1257
1258 public:
1259 ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1260 : ZTask("ZPromotePagesTask"),
1261 _iter(pages) {}
1262
1263 virtual void work() {
1264 SuspendibleThreadSetJoiner sts_joiner;
1265 ZArray<ZPage*> promoted_pages;
1266
1267 for (ZPage* prev_page; _iter.next(&prev_page);) {
1268 const ZPageAge from_age = prev_page->age();
1269 const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1270 assert(from_age != ZPageAge::old, "invalid age for a young collection");
1271
1272 // Figure out if this is proper promotion
1273 const bool promotion = to_age == ZPageAge::old;
1274
1275 if (promotion) {
1276 // Before promoting an object (and before relocate start), we must ensure that all
1277 // contained zpointers are store good. The marking code ensures that for non-null
1278 // pointers, but null pointers are ignored. This code ensures that even null pointers
1279 // are made store good, for the promoted objects.
1280 prev_page->object_iterate([&](oop obj) {
1281 ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1282 });
1283 }
1284
1285 // Logging
1286 prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1287
1288 // Setup to-space page
1289 ZPage* const new_page = promotion
1290 ? prev_page->clone_for_promotion()
1291 : prev_page->reset(to_age);
1292
1293 // Reset page for flip aging
1294 new_page->reset_livemap();
1295
1296 if (promotion) {
1297 ZGeneration::young()->flip_promote(prev_page, new_page);
1298 // Defer promoted page registration times the lock is taken
1299 promoted_pages.push(prev_page);
1300 }
1301
1302 SuspendibleThreadSet::yield();
1303 }
1304
1305 ZGeneration::young()->register_flip_promoted(promoted_pages);
1306 }
1307 };
1308
1309 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1310 ZFlipAgePagesTask flip_age_task(pages);
1311 workers()->run(&flip_age_task);
1312 }
1313
1314 void ZRelocate::synchronize() {
1315 _queue.synchronize();
1316 }
1317
1318 void ZRelocate::desynchronize() {
1319 _queue.desynchronize();
1320 }
1321
1322 ZRelocateQueue* ZRelocate::queue() {
1323 return &_queue;
1324 }
1325
1326 bool ZRelocate::is_queue_active() const {
1327 return _queue.is_active();
1328 }