1 /*
2 * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/shared/gc_globals.hpp"
26 #include "gc/shared/suspendibleThreadSet.hpp"
27 #include "gc/z/zAbort.inline.hpp"
28 #include "gc/z/zAddress.inline.hpp"
29 #include "gc/z/zAllocator.inline.hpp"
30 #include "gc/z/zBarrier.inline.hpp"
31 #include "gc/z/zCollectedHeap.hpp"
32 #include "gc/z/zForwarding.inline.hpp"
33 #include "gc/z/zGeneration.inline.hpp"
34 #include "gc/z/zHeap.inline.hpp"
35 #include "gc/z/zIndexDistributor.inline.hpp"
36 #include "gc/z/zIterator.inline.hpp"
37 #include "gc/z/zPage.inline.hpp"
38 #include "gc/z/zPageAge.hpp"
39 #include "gc/z/zRelocate.hpp"
40 #include "gc/z/zRelocationSet.inline.hpp"
41 #include "gc/z/zRootsIterator.hpp"
42 #include "gc/z/zStackWatermark.hpp"
43 #include "gc/z/zStat.hpp"
44 #include "gc/z/zTask.hpp"
45 #include "gc/z/zUncoloredRoot.inline.hpp"
46 #include "gc/z/zVerify.hpp"
47 #include "gc/z/zWorkers.hpp"
48 #include "prims/jvmtiTagMap.hpp"
49 #include "runtime/atomic.hpp"
50 #include "utilities/debug.hpp"
51
52 static const ZStatCriticalPhase ZCriticalPhaseRelocationStall("Relocation Stall");
53 static const ZStatSubPhase ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung("Concurrent Relocate Remset FP", ZGenerationId::young);
54
55 static uintptr_t forwarding_index(ZForwarding* forwarding, zoffset from_offset) {
56 return (from_offset - forwarding->start()) >> forwarding->object_alignment_shift();
57 }
58
59 static zaddress forwarding_find(ZForwarding* forwarding, zoffset from_offset, ZForwardingCursor* cursor) {
60 const uintptr_t from_index = forwarding_index(forwarding, from_offset);
61 const ZForwardingEntry entry = forwarding->find(from_index, cursor);
62 return entry.populated() ? ZOffset::address(to_zoffset(entry.to_offset())) : zaddress::null;
63 }
64
65 static zaddress forwarding_find(ZForwarding* forwarding, zaddress_unsafe from_addr, ZForwardingCursor* cursor) {
66 return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor);
67 }
68
69 static zaddress forwarding_find(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
70 return forwarding_find(forwarding, ZAddress::offset(from_addr), cursor);
71 }
72
73 static zaddress forwarding_insert(ZForwarding* forwarding, zoffset from_offset, zaddress to_addr, ZForwardingCursor* cursor) {
74 const uintptr_t from_index = forwarding_index(forwarding, from_offset);
75 const zoffset to_offset = ZAddress::offset(to_addr);
76 const zoffset to_offset_final = forwarding->insert(from_index, to_offset, cursor);
77 return ZOffset::address(to_offset_final);
78 }
79
80 static zaddress forwarding_insert(ZForwarding* forwarding, zaddress from_addr, zaddress to_addr, ZForwardingCursor* cursor) {
81 return forwarding_insert(forwarding, ZAddress::offset(from_addr), to_addr, cursor);
82 }
83
84 ZRelocateQueue::ZRelocateQueue()
85 : _lock(),
86 _queue(),
87 _nworkers(0),
88 _nsynchronized(0),
89 _synchronize(false),
90 _is_active(false),
91 _needs_attention(0) {}
92
93 bool ZRelocateQueue::needs_attention() const {
94 return Atomic::load(&_needs_attention) != 0;
95 }
96
97 void ZRelocateQueue::inc_needs_attention() {
98 const int needs_attention = Atomic::add(&_needs_attention, 1);
99 assert(needs_attention == 1 || needs_attention == 2, "Invalid state");
100 }
101
102 void ZRelocateQueue::dec_needs_attention() {
103 const int needs_attention = Atomic::sub(&_needs_attention, 1);
104 assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
105 }
106
107 void ZRelocateQueue::activate(uint nworkers) {
108 _is_active = true;
109 join(nworkers);
110 }
111
112 void ZRelocateQueue::deactivate() {
113 Atomic::store(&_is_active, false);
114 clear();
115 }
116
117 bool ZRelocateQueue::is_active() const {
118 return Atomic::load(&_is_active);
119 }
120
121 void ZRelocateQueue::join(uint nworkers) {
122 assert(nworkers != 0, "Must request at least one worker");
123 assert(_nworkers == 0, "Invalid state");
124 assert(_nsynchronized == 0, "Invalid state");
125
126 log_debug(gc, reloc)("Joining workers: %u", nworkers);
127
128 _nworkers = nworkers;
129 }
130
131 void ZRelocateQueue::resize_workers(uint nworkers) {
132 assert(nworkers != 0, "Must request at least one worker");
133 assert(_nworkers == 0, "Invalid state");
134 assert(_nsynchronized == 0, "Invalid state");
135
136 log_debug(gc, reloc)("Resize workers: %u", nworkers);
137
138 ZLocker<ZConditionLock> locker(&_lock);
139 _nworkers = nworkers;
140 }
141
142 void ZRelocateQueue::leave() {
143 ZLocker<ZConditionLock> locker(&_lock);
144 _nworkers--;
145
146 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
147
148 log_debug(gc, reloc)("Leaving workers: left: %u _synchronize: %d _nsynchronized: %u", _nworkers, _synchronize, _nsynchronized);
149
150 // Prune done forwardings
151 const bool forwardings_done = prune();
152
153 // Check if all workers synchronized
154 const bool last_synchronized = _synchronize && _nworkers == _nsynchronized;
155
156 if (forwardings_done || last_synchronized) {
157 _lock.notify_all();
158 }
159 }
160
161 void ZRelocateQueue::add_and_wait(ZForwarding* forwarding) {
162 ZStatTimer timer(ZCriticalPhaseRelocationStall);
163 ZLocker<ZConditionLock> locker(&_lock);
164
165 if (forwarding->is_done()) {
166 return;
167 }
168
169 _queue.append(forwarding);
170 if (_queue.length() == 1) {
171 // Queue became non-empty
172 inc_needs_attention();
173 _lock.notify_all();
174 }
175
176 while (!forwarding->is_done()) {
177 _lock.wait();
178 }
179 }
180
181 bool ZRelocateQueue::prune() {
182 if (_queue.is_empty()) {
183 return false;
184 }
185
186 bool done = false;
187
188 for (int i = 0; i < _queue.length();) {
189 const ZForwarding* const forwarding = _queue.at(i);
190 if (forwarding->is_done()) {
191 done = true;
192
193 _queue.delete_at(i);
194 } else {
195 i++;
196 }
197 }
198
199 if (_queue.is_empty()) {
200 dec_needs_attention();
201 }
202
203 return done;
204 }
205
206 ZForwarding* ZRelocateQueue::prune_and_claim() {
207 if (prune()) {
208 _lock.notify_all();
209 }
210
211 for (int i = 0; i < _queue.length(); i++) {
212 ZForwarding* const forwarding = _queue.at(i);
213 if (forwarding->claim()) {
214 return forwarding;
215 }
216 }
217
218 return nullptr;
219 }
220
221 class ZRelocateQueueSynchronizeThread {
222 private:
223 ZRelocateQueue* const _queue;
224
225 public:
226 ZRelocateQueueSynchronizeThread(ZRelocateQueue* queue)
227 : _queue(queue) {
228 _queue->synchronize_thread();
229 }
230
231 ~ZRelocateQueueSynchronizeThread() {
232 _queue->desynchronize_thread();
233 }
234 };
235
236 void ZRelocateQueue::synchronize_thread() {
237 _nsynchronized++;
238
239 log_debug(gc, reloc)("Synchronize worker _nsynchronized %u", _nsynchronized);
240
241 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
242 if (_nsynchronized == _nworkers) {
243 // All workers synchronized
244 _lock.notify_all();
245 }
246 }
247
248 void ZRelocateQueue::desynchronize_thread() {
249 _nsynchronized--;
250
251 log_debug(gc, reloc)("Desynchronize worker _nsynchronized %u", _nsynchronized);
252
253 assert(_nsynchronized < _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
254 }
255
256 ZForwarding* ZRelocateQueue::synchronize_poll() {
257 // Fast path avoids locking
258 if (!needs_attention()) {
259 return nullptr;
260 }
261
262 // Slow path to get the next forwarding and/or synchronize
263 ZLocker<ZConditionLock> locker(&_lock);
264
265 {
266 ZForwarding* const forwarding = prune_and_claim();
267 if (forwarding != nullptr) {
268 // Don't become synchronized while there are elements in the queue
269 return forwarding;
270 }
271 }
272
273 if (!_synchronize) {
274 return nullptr;
275 }
276
277 ZRelocateQueueSynchronizeThread rqst(this);
278
279 do {
280 _lock.wait();
281
282 ZForwarding* const forwarding = prune_and_claim();
283 if (forwarding != nullptr) {
284 return forwarding;
285 }
286 } while (_synchronize);
287
288 return nullptr;
289 }
290
291 void ZRelocateQueue::clear() {
292 assert(_nworkers == 0, "Invalid state");
293
294 if (_queue.is_empty()) {
295 return;
296 }
297
298 ZArrayIterator<ZForwarding*> iter(&_queue);
299 for (ZForwarding* forwarding; iter.next(&forwarding);) {
300 assert(forwarding->is_done(), "All should be done");
301 }
302
303 assert(false, "Clear was not empty");
304
305 _queue.clear();
306 dec_needs_attention();
307 }
308
309 void ZRelocateQueue::synchronize() {
310 ZLocker<ZConditionLock> locker(&_lock);
311 _synchronize = true;
312
313 inc_needs_attention();
314
315 log_debug(gc, reloc)("Synchronize all workers 1 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
316
317 while (_nworkers != _nsynchronized) {
318 _lock.wait();
319 log_debug(gc, reloc)("Synchronize all workers 2 _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
320 }
321 }
322
323 void ZRelocateQueue::desynchronize() {
324 ZLocker<ZConditionLock> locker(&_lock);
325 _synchronize = false;
326
327 log_debug(gc, reloc)("Desynchronize all workers _nworkers: %u _nsynchronized: %u", _nworkers, _nsynchronized);
328
329 assert(_nsynchronized <= _nworkers, "_nsynchronized: %u _nworkers: %u", _nsynchronized, _nworkers);
330
331 dec_needs_attention();
332
333 _lock.notify_all();
334 }
335
336 ZRelocate::ZRelocate(ZGeneration* generation)
337 : _generation(generation),
338 _queue() {}
339
340 ZWorkers* ZRelocate::workers() const {
341 return _generation->workers();
342 }
343
344 void ZRelocate::start() {
345 _queue.activate(workers()->active_workers());
346 }
347
348 void ZRelocate::add_remset(volatile zpointer* p) {
349 ZGeneration::young()->remember(p);
350 }
351
352 static zaddress relocate_object_inner(ZForwarding* forwarding, zaddress from_addr, ZForwardingCursor* cursor) {
353 assert(ZHeap::heap()->is_object_live(from_addr), "Should be live");
354
355 // Allocate object
356 const size_t size = ZUtils::object_size(from_addr);
357
358 ZAllocatorForRelocation* allocator = ZAllocator::relocation(forwarding->to_age());
359
360 const zaddress to_addr = allocator->alloc_object(size);
361
362 if (is_null(to_addr)) {
363 // Allocation failed
364 return zaddress::null;
365 }
366
367 // Copy object
368 ZUtils::object_copy_disjoint(from_addr, to_addr, size);
369
370 // Insert forwarding
371 const zaddress to_addr_final = forwarding_insert(forwarding, from_addr, to_addr, cursor);
372
373 if (to_addr_final != to_addr) {
374 // Already relocated, try undo allocation
375 allocator->undo_alloc_object(to_addr, size);
376 }
377
378 return to_addr_final;
379 }
380
381 zaddress ZRelocate::relocate_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
382 ZForwardingCursor cursor;
383
384 // Lookup forwarding
385 zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor);
386 if (!is_null(to_addr)) {
387 // Already relocated
388 return to_addr;
389 }
390
391 // Relocate object
392 if (forwarding->retain_page(&_queue)) {
393 assert(_generation->is_phase_relocate(), "Must be");
394 to_addr = relocate_object_inner(forwarding, safe(from_addr), &cursor);
395 forwarding->release_page();
396
397 if (!is_null(to_addr)) {
398 // Success
399 return to_addr;
400 }
401
402 // Failed to relocate object. Signal and wait for a worker thread to
403 // complete relocation of this page, and then forward the object.
404 _queue.add_and_wait(forwarding);
405 }
406
407 // Forward object
408 return forward_object(forwarding, from_addr);
409 }
410
411 zaddress ZRelocate::forward_object(ZForwarding* forwarding, zaddress_unsafe from_addr) {
412 ZForwardingCursor cursor;
413 const zaddress to_addr = forwarding_find(forwarding, from_addr, &cursor);
414 assert(!is_null(to_addr), "Should be forwarded: " PTR_FORMAT, untype(from_addr));
415 return to_addr;
416 }
417
418 static ZPage* alloc_page(ZAllocatorForRelocation* allocator, ZPageType type, size_t size) {
419 if (ZStressRelocateInPlace) {
420 // Simulate failure to allocate a new page. This will
421 // cause the page being relocated to be relocated in-place.
422 return nullptr;
423 }
424
425 ZAllocationFlags flags;
426 flags.set_non_blocking();
427 flags.set_gc_relocation();
428
429 return allocator->alloc_page_for_relocation(type, size, flags);
430 }
431
432 static void retire_target_page(ZGeneration* generation, ZPage* page) {
433 if (generation->is_young() && page->is_old()) {
434 generation->increase_promoted(page->used());
435 } else {
436 generation->increase_compacted(page->used());
437 }
438
439 // Free target page if it is empty. We can end up with an empty target
440 // page if we allocated a new target page, and then lost the race to
441 // relocate the remaining objects, leaving the target page empty when
442 // relocation completed.
443 if (page->used() == 0) {
444 ZHeap::heap()->free_page(page);
445 }
446 }
447
448 class ZRelocateSmallAllocator {
449 private:
450 ZGeneration* const _generation;
451 volatile size_t _in_place_count;
452
453 public:
454 ZRelocateSmallAllocator(ZGeneration* generation)
455 : _generation(generation),
456 _in_place_count(0) {}
457
458 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
459 ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
460 ZPage* const page = alloc_page(allocator, forwarding->type(), forwarding->size());
461 if (page == nullptr) {
462 Atomic::inc(&_in_place_count);
463 }
464
465 if (target != nullptr) {
466 // Retire the old target page
467 retire_target_page(_generation, target);
468 }
469
470 return page;
471 }
472
473 void share_target_page(ZPage* page) {
474 // Does nothing
475 }
476
477 void free_target_page(ZPage* page) {
478 if (page != nullptr) {
479 retire_target_page(_generation, page);
480 }
481 }
482
483 zaddress alloc_object(ZPage* page, size_t size) const {
484 return (page != nullptr) ? page->alloc_object(size) : zaddress::null;
485 }
486
487 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
488 page->undo_alloc_object(addr, size);
489 }
490
491 const size_t in_place_count() const {
492 return _in_place_count;
493 }
494 };
495
496 class ZRelocateMediumAllocator {
497 private:
498 ZGeneration* const _generation;
499 ZConditionLock _lock;
500 ZPage* _shared[ZAllocator::_relocation_allocators];
501 bool _in_place;
502 volatile size_t _in_place_count;
503
504 public:
505 ZRelocateMediumAllocator(ZGeneration* generation)
506 : _generation(generation),
507 _lock(),
508 _shared(),
509 _in_place(false),
510 _in_place_count(0) {}
511
512 ~ZRelocateMediumAllocator() {
513 for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
514 if (_shared[i] != nullptr) {
515 retire_target_page(_generation, _shared[i]);
516 }
517 }
518 }
519
520 ZPage* shared(ZPageAge age) {
521 return _shared[static_cast<uint>(age) - 1];
522 }
523
524 void set_shared(ZPageAge age, ZPage* page) {
525 _shared[static_cast<uint>(age) - 1] = page;
526 }
527
528 ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
529 ZLocker<ZConditionLock> locker(&_lock);
530
531 // Wait for any ongoing in-place relocation to complete
532 while (_in_place) {
533 _lock.wait();
534 }
535
536 // Allocate a new page only if the shared page is the same as the
537 // current target page. The shared page will be different from the
538 // current target page if another thread shared a page, or allocated
539 // a new page.
540 const ZPageAge to_age = forwarding->to_age();
541 if (shared(to_age) == target) {
542 ZAllocatorForRelocation* const allocator = ZAllocator::relocation(forwarding->to_age());
543 ZPage* const to_page = alloc_page(allocator, forwarding->type(), forwarding->size());
544 set_shared(to_age, to_page);
545 if (to_page == nullptr) {
546 Atomic::inc(&_in_place_count);
547 _in_place = true;
548 }
549
550 // This thread is responsible for retiring the shared target page
551 if (target != nullptr) {
552 retire_target_page(_generation, target);
553 }
554 }
555
556 return shared(to_age);
557 }
558
559 void share_target_page(ZPage* page) {
560 const ZPageAge age = page->age();
561
562 ZLocker<ZConditionLock> locker(&_lock);
563 assert(_in_place, "Invalid state");
564 assert(shared(age) == nullptr, "Invalid state");
565 assert(page != nullptr, "Invalid page");
566
567 set_shared(age, page);
568 _in_place = false;
569
570 _lock.notify_all();
571 }
572
573 void free_target_page(ZPage* page) {
574 // Does nothing
575 }
576
577 zaddress alloc_object(ZPage* page, size_t size) const {
578 return (page != nullptr) ? page->alloc_object_atomic(size) : zaddress::null;
579 }
580
581 void undo_alloc_object(ZPage* page, zaddress addr, size_t size) const {
582 page->undo_alloc_object_atomic(addr, size);
583 }
584
585 const size_t in_place_count() const {
586 return _in_place_count;
587 }
588 };
589
590 template <typename Allocator>
591 class ZRelocateWork : public StackObj {
592 private:
593 Allocator* const _allocator;
594 ZForwarding* _forwarding;
595 ZPage* _target[ZAllocator::_relocation_allocators];
596 ZGeneration* const _generation;
597 size_t _other_promoted;
598 size_t _other_compacted;
599
600 ZPage* target(ZPageAge age) {
601 return _target[static_cast<uint>(age) - 1];
602 }
603
604 void set_target(ZPageAge age, ZPage* page) {
605 _target[static_cast<uint>(age) - 1] = page;
606 }
607
608 size_t object_alignment() const {
609 return (size_t)1 << _forwarding->object_alignment_shift();
610 }
611
612 void increase_other_forwarded(size_t unaligned_object_size) {
613 const size_t aligned_size = align_up(unaligned_object_size, object_alignment());
614 if (_forwarding->is_promotion()) {
615 _other_promoted += aligned_size;
616 } else {
617 _other_compacted += aligned_size;
618 }
619 }
620
621 zaddress try_relocate_object_inner(zaddress from_addr) {
622 ZForwardingCursor cursor;
623
624 ZPage* const to_page = target(_forwarding->to_age());
625
626 // Lookup forwarding
627 {
628 const zaddress to_addr = forwarding_find(_forwarding, from_addr, &cursor);
629 if (!is_null(to_addr)) {
630 // Already relocated
631 const size_t size = ZUtils::object_size(to_addr);
632 increase_other_forwarded(size);
633 return to_addr;
634 }
635 }
636
637 // Allocate object
638 const size_t size = ZUtils::object_size(from_addr);
639 const zaddress allocated_addr = _allocator->alloc_object(to_page, size);
640 if (is_null(allocated_addr)) {
641 // Allocation failed
642 return zaddress::null;
643 }
644
645 // Copy object. Use conjoint copying if we are relocating
646 // in-place and the new object overlaps with the old object.
647 if (_forwarding->in_place_relocation() && allocated_addr + size > from_addr) {
648 ZUtils::object_copy_conjoint(from_addr, allocated_addr, size);
649 } else {
650 ZUtils::object_copy_disjoint(from_addr, allocated_addr, size);
651 }
652
653 // Insert forwarding
654 const zaddress to_addr = forwarding_insert(_forwarding, from_addr, allocated_addr, &cursor);
655 if (to_addr != allocated_addr) {
656 // Already relocated, undo allocation
657 _allocator->undo_alloc_object(to_page, to_addr, size);
658 increase_other_forwarded(size);
659 }
660
661 return to_addr;
662 }
663
664 void update_remset_old_to_old(zaddress from_addr, zaddress to_addr) const {
665 // Old-to-old relocation - move existing remset bits
666
667 // If this is called for an in-place relocated page, then this code has the
668 // responsibility to clear the old remset bits. Extra care is needed because:
669 //
670 // 1) The to-object copy can overlap with the from-object copy
671 // 2) Remset bits of old objects need to be cleared
672 //
673 // A watermark is used to keep track of how far the old remset bits have been removed.
674
675 const bool in_place = _forwarding->in_place_relocation();
676 ZPage* const from_page = _forwarding->page();
677 const uintptr_t from_local_offset = from_page->local_offset(from_addr);
678
679 // Note: even with in-place relocation, the to_page could be another page
680 ZPage* const to_page = ZHeap::heap()->page(to_addr);
681
682 // Uses _relaxed version to handle that in-place relocation resets _top
683 assert(ZHeap::heap()->is_in_page_relaxed(from_page, from_addr), "Must be");
684 assert(to_page->is_in(to_addr), "Must be");
685
686
687 // Read the size from the to-object, since the from-object
688 // could have been overwritten during in-place relocation.
689 const size_t size = ZUtils::object_size(to_addr);
690
691 // If a young generation collection started while the old generation
692 // relocated objects, the remember set bits were flipped from "current"
693 // to "previous".
694 //
695 // We need to select the correct remembered sets bitmap to ensure that the
696 // old remset bits are found.
697 //
698 // Note that if the young generation marking (remset scanning) finishes
699 // before the old generation relocation has relocated this page, then the
700 // young generation will visit this page's previous remembered set bits and
701 // moved them over to the current bitmap.
702 //
703 // If the young generation runs multiple cycles while the old generation is
704 // relocating, then the first cycle will have consume the the old remset,
705 // bits and moved associated objects to a new old page. The old relocation
706 // could find either the the two bitmaps. So, either it will find the original
707 // remset bits for the page, or it will find an empty bitmap for the page. It
708 // doesn't matter for correctness, because the young generation marking has
709 // already taken care of the bits.
710
711 const bool active_remset_is_current = ZGeneration::old()->active_remset_is_current();
712
713 // When in-place relocation is done and the old remset bits are located in
714 // the bitmap that is going to be used for the new remset bits, then we
715 // need to clear the old bits before the new bits are inserted.
716 const bool iterate_current_remset = active_remset_is_current && !in_place;
717
718 BitMap::Iterator iter = iterate_current_remset
719 ? from_page->remset_iterator_limited_current(from_local_offset, size)
720 : from_page->remset_iterator_limited_previous(from_local_offset, size);
721
722 for (BitMap::idx_t field_bit : iter) {
723 const uintptr_t field_local_offset = ZRememberedSet::to_offset(field_bit);
724
725 // Add remset entry in the to-page
726 const uintptr_t offset = field_local_offset - from_local_offset;
727 const zaddress to_field = to_addr + offset;
728 log_trace(gc, reloc)("Remember: from: " PTR_FORMAT " to: " PTR_FORMAT " current: %d marking: %d page: " PTR_FORMAT " remset: " PTR_FORMAT,
729 untype(from_page->start() + field_local_offset), untype(to_field), active_remset_is_current, ZGeneration::young()->is_phase_mark(), p2i(to_page), p2i(to_page->remset_current()));
730
731 volatile zpointer* const p = (volatile zpointer*)to_field;
732
733 if (ZGeneration::young()->is_phase_mark()) {
734 // Young generation remembered set scanning needs to know about this
735 // field. It will take responsibility to add a new remember set entry if needed.
736 _forwarding->relocated_remembered_fields_register(p);
737 } else {
738 to_page->remember(p);
739 if (in_place) {
740 assert(to_page->is_remembered(p), "p: " PTR_FORMAT, p2i(p));
741 }
742 }
743 }
744 }
745
746 static bool add_remset_if_young(volatile zpointer* p, zaddress addr) {
747 if (ZHeap::heap()->is_young(addr)) {
748 ZRelocate::add_remset(p);
749 return true;
750 }
751
752 return false;
753 }
754
755 static void update_remset_promoted_filter_and_remap_per_field(volatile zpointer* p) {
756 const zpointer ptr = Atomic::load(p);
757
758 assert(ZPointer::is_old_load_good(ptr), "Should be at least old load good: " PTR_FORMAT, untype(ptr));
759
760 if (ZPointer::is_store_good(ptr)) {
761 // Already has a remset entry
762 return;
763 }
764
765 if (ZPointer::is_load_good(ptr)) {
766 if (!is_null_any(ptr)) {
767 const zaddress addr = ZPointer::uncolor(ptr);
768 add_remset_if_young(p, addr);
769 }
770 // No need to remap it is already load good
771 return;
772 }
773
774 if (is_null_any(ptr)) {
775 // Eagerly remap to skip adding a remset entry just to get deferred remapping
776 ZBarrier::remap_young_relocated(p, ptr);
777 return;
778 }
779
780 const zaddress_unsafe addr_unsafe = ZPointer::uncolor_unsafe(ptr);
781 ZForwarding* const forwarding = ZGeneration::young()->forwarding(addr_unsafe);
782
783 if (forwarding == nullptr) {
784 // Object isn't being relocated
785 const zaddress addr = safe(addr_unsafe);
786 if (!add_remset_if_young(p, addr)) {
787 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
788 ZBarrier::remap_young_relocated(p, ptr);
789 }
790 return;
791 }
792
793 const zaddress addr = forwarding->find(addr_unsafe);
794
795 if (!is_null(addr)) {
796 // Object has already been relocated
797 if (!add_remset_if_young(p, addr)) {
798 // Not young - eagerly remap to skip adding a remset entry just to get deferred remapping
799 ZBarrier::remap_young_relocated(p, ptr);
800 }
801 return;
802 }
803
804 // Object has not been relocated yet
805 // Don't want to eagerly relocate objects, so just add a remset
806 ZRelocate::add_remset(p);
807 return;
808 }
809
810 void update_remset_promoted(zaddress to_addr) const {
811 ZIterator::basic_oop_iterate(to_oop(to_addr), update_remset_promoted_filter_and_remap_per_field);
812 }
813
814 void update_remset_for_fields(zaddress from_addr, zaddress to_addr) const {
815 if (_forwarding->to_age() != ZPageAge::old) {
816 // No remembered set in young pages
817 return;
818 }
819
820 // Need to deal with remset when moving objects to the old generation
821 if (_forwarding->from_age() == ZPageAge::old) {
822 update_remset_old_to_old(from_addr, to_addr);
823 return;
824 }
825
826 // Normal promotion
827 update_remset_promoted(to_addr);
828 }
829
830 bool try_relocate_object(zaddress from_addr) {
831 const zaddress to_addr = try_relocate_object_inner(from_addr);
832
833 if (is_null(to_addr)) {
834 return false;
835 }
836
837 update_remset_for_fields(from_addr, to_addr);
838
839 return true;
840 }
841
842 void start_in_place_relocation_prepare_remset(ZPage* from_page) {
843 if (_forwarding->from_age() != ZPageAge::old) {
844 // Only old pages have use remset bits
845 return;
846 }
847
848 if (ZGeneration::old()->active_remset_is_current()) {
849 // We want to iterate over and clear the remset bits of the from-space page,
850 // and insert current bits in the to-space page. However, with in-place
851 // relocation, the from-space and to-space pages are the same. Clearing
852 // is destructive, and is difficult to perform before or during the iteration.
853 // However, clearing of the current bits has to be done before exposing the
854 // to-space objects in the forwarding table.
855 //
856 // To solve this tricky dependency problem, we start by stashing away the
857 // current bits in the previous bits, and clearing the current bits
858 // (implemented by swapping the bits). This way, the current bits are
859 // cleared before copying the objects (like a normal to-space page),
860 // and the previous bits are representing a copy of the current bits
861 // of the from-space page, and are used for iteration.
862 from_page->swap_remset_bitmaps();
863 }
864 }
865
866 ZPage* start_in_place_relocation(zoffset relocated_watermark) {
867 _forwarding->in_place_relocation_claim_page();
868 _forwarding->in_place_relocation_start(relocated_watermark);
869
870 ZPage* const from_page = _forwarding->page();
871
872 const ZPageAge to_age = _forwarding->to_age();
873 const bool promotion = _forwarding->is_promotion();
874
875 // Promotions happen through a new cloned page
876 ZPage* const to_page = promotion ? from_page->clone_limited() : from_page;
877 to_page->reset(to_age, ZPageResetType::InPlaceRelocation);
878
879 // Clear remset bits for all objects that were relocated
880 // before this page became an in-place relocated page.
881 start_in_place_relocation_prepare_remset(from_page);
882
883 if (promotion) {
884 // Register the the promotion
885 ZGeneration::young()->in_place_relocate_promote(from_page, to_page);
886 ZGeneration::young()->register_in_place_relocate_promoted(from_page);
887 }
888
889 return to_page;
890 }
891
892 void relocate_object(oop obj) {
893 const zaddress addr = to_zaddress(obj);
894 assert(ZHeap::heap()->is_object_live(addr), "Should be live");
895
896 while (!try_relocate_object(addr)) {
897 // Allocate a new target page, or if that fails, use the page being
898 // relocated as the new target, which will cause it to be relocated
899 // in-place.
900 const ZPageAge to_age = _forwarding->to_age();
901 ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age));
902 set_target(to_age, to_page);
903 if (to_page != nullptr) {
904 continue;
905 }
906
907 // Start in-place relocation to block other threads from accessing
908 // the page, or its forwarding table, until it has been released
909 // (relocation completed).
910 to_page = start_in_place_relocation(ZAddress::offset(addr));
911 set_target(to_age, to_page);
912 }
913 }
914
915 public:
916 ZRelocateWork(Allocator* allocator, ZGeneration* generation)
917 : _allocator(allocator),
918 _forwarding(nullptr),
919 _target(),
920 _generation(generation),
921 _other_promoted(0),
922 _other_compacted(0) {}
923
924 ~ZRelocateWork() {
925 for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
926 _allocator->free_target_page(_target[i]);
927 }
928 // Report statistics on-behalf of non-worker threads
929 _generation->increase_promoted(_other_promoted);
930 _generation->increase_compacted(_other_compacted);
931 }
932
933 bool active_remset_is_current() const {
934 // Normal old-to-old relocation can treat the from-page remset as a
935 // read-only copy, and then copy over the appropriate remset bits to the
936 // cleared to-page's 'current' remset bitmap.
937 //
938 // In-place relocation is more complicated. Since, the same page is both
939 // a from-page and a to-page, we need to remove the old remset bits, and
940 // add remset bits that corresponds to the new locations of the relocated
941 // objects.
942 //
943 // Depending on how long ago (in terms of number of young GC's and the
944 // current young GC's phase), the page was allocated, the active
945 // remembered set will be in either the 'current' or 'previous' bitmap.
946 //
947 // If the active bits are in the 'previous' bitmap, we know that the
948 // 'current' bitmap was cleared at some earlier point in time, and we can
949 // simply set new bits in 'current' bitmap, and later when relocation has
950 // read all the old remset bits, we could just clear the 'previous' remset
951 // bitmap.
952 //
953 // If, on the other hand, the active bits are in the 'current' bitmap, then
954 // that bitmap will be used to both read the old remset bits, and the
955 // destination for the remset bits that we copy when an object is copied
956 // to it's new location within the page. We need to *carefully* remove all
957 // all old remset bits, without clearing out the newly set bits.
958 return ZGeneration::old()->active_remset_is_current();
959 }
960
961 void clear_remset_before_reuse(ZPage* page, bool in_place) {
962 if (_forwarding->from_age() != ZPageAge::old) {
963 // No remset bits
964 return;
965 }
966
967 if (in_place) {
968 // Clear 'previous' remset bits. For in-place relocated pages, the previous
969 // remset bits are always used, even when active_remset_is_current().
970 page->clear_remset_previous();
971
972 return;
973 }
974
975 // Normal relocate
976
977 // Clear active remset bits
978 if (active_remset_is_current()) {
979 page->clear_remset_current();
980 } else {
981 page->clear_remset_previous();
982 }
983
984 // Verify that inactive remset bits are all cleared
985 if (active_remset_is_current()) {
986 page->verify_remset_cleared_previous();
987 } else {
988 page->verify_remset_cleared_current();
989 }
990 }
991
992 void finish_in_place_relocation() {
993 // We are done with the from_space copy of the page
994 _forwarding->in_place_relocation_finish();
995 }
996
997 void do_forwarding(ZForwarding* forwarding) {
998 _forwarding = forwarding;
999
1000 _forwarding->page()->log_msg(" (relocate page)");
1001
1002 ZVerify::before_relocation(_forwarding);
1003
1004 // Relocate objects
1005 _forwarding->object_iterate([&](oop obj) { relocate_object(obj); });
1006
1007 ZVerify::after_relocation(_forwarding);
1008
1009 // Verify
1010 if (ZVerifyForwarding) {
1011 _forwarding->verify();
1012 }
1013
1014 _generation->increase_freed(_forwarding->page()->size());
1015
1016 // Deal with in-place relocation
1017 const bool in_place = _forwarding->in_place_relocation();
1018 if (in_place) {
1019 finish_in_place_relocation();
1020 }
1021
1022 // Old from-space pages need to deal with remset bits
1023 if (_forwarding->from_age() == ZPageAge::old) {
1024 _forwarding->relocated_remembered_fields_after_relocate();
1025 }
1026
1027 // Release relocated page
1028 _forwarding->release_page();
1029
1030 if (in_place) {
1031 // Wait for all other threads to call release_page
1032 ZPage* const page = _forwarding->detach_page();
1033
1034 // Ensure that previous remset bits are cleared
1035 clear_remset_before_reuse(page, true /* in_place */);
1036
1037 page->log_msg(" (relocate page done in-place)");
1038
1039 // Different pages when promoting
1040 ZPage* const target_page = target(_forwarding->to_age());
1041 _allocator->share_target_page(target_page);
1042
1043 } else {
1044 // Wait for all other threads to call release_page
1045 ZPage* const page = _forwarding->detach_page();
1046
1047 // Ensure that all remset bits are cleared
1048 // Note: cleared after detach_page, when we know that
1049 // the young generation isn't scanning the remset.
1050 clear_remset_before_reuse(page, false /* in_place */);
1051
1052 page->log_msg(" (relocate page done normal)");
1053
1054 // Free page
1055 ZHeap::heap()->free_page(page);
1056 }
1057 }
1058 };
1059
1060 class ZRelocateStoreBufferInstallBasePointersThreadClosure : public ThreadClosure {
1061 public:
1062 virtual void do_thread(Thread* thread) {
1063 JavaThread* const jt = JavaThread::cast(thread);
1064 ZStoreBarrierBuffer* buffer = ZThreadLocalData::store_barrier_buffer(jt);
1065 buffer->install_base_pointers();
1066 }
1067 };
1068
1069 // Installs the object base pointers (object starts), for the fields written
1070 // in the store buffer. The code that searches for the object start uses that
1071 // liveness information stored in the pages. That information is lost when the
1072 // pages have been relocated and then destroyed.
1073 class ZRelocateStoreBufferInstallBasePointersTask : public ZTask {
1074 private:
1075 ZJavaThreadsIterator _threads_iter;
1076
1077 public:
1078 ZRelocateStoreBufferInstallBasePointersTask(ZGeneration* generation)
1079 : ZTask("ZRelocateStoreBufferInstallBasePointersTask"),
1080 _threads_iter(generation->id_optional()) {}
1081
1082 virtual void work() {
1083 ZRelocateStoreBufferInstallBasePointersThreadClosure fix_store_buffer_cl;
1084 _threads_iter.apply(&fix_store_buffer_cl);
1085 }
1086 };
1087
1088 class ZRelocateTask : public ZRestartableTask {
1089 private:
1090 ZRelocationSetParallelIterator _iter;
1091 ZGeneration* const _generation;
1092 ZRelocateQueue* const _queue;
1093 ZRelocateSmallAllocator _small_allocator;
1094 ZRelocateMediumAllocator _medium_allocator;
1095
1096 public:
1097 ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue)
1098 : ZRestartableTask("ZRelocateTask"),
1099 _iter(relocation_set),
1100 _generation(relocation_set->generation()),
1101 _queue(queue),
1102 _small_allocator(_generation),
1103 _medium_allocator(_generation) {}
1104
1105 ~ZRelocateTask() {
1106 _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
1107
1108 // Signal that we're not using the queue anymore. Used mostly for asserts.
1109 _queue->deactivate();
1110 }
1111
1112 virtual void work() {
1113 ZRelocateWork<ZRelocateSmallAllocator> small(&_small_allocator, _generation);
1114 ZRelocateWork<ZRelocateMediumAllocator> medium(&_medium_allocator, _generation);
1115
1116 const auto do_forwarding = [&](ZForwarding* forwarding) {
1117 ZPage* const page = forwarding->page();
1118 if (page->is_small()) {
1119 small.do_forwarding(forwarding);
1120 } else {
1121 medium.do_forwarding(forwarding);
1122 }
1123
1124 // Absolute last thing done while relocating a page.
1125 //
1126 // We don't use the SuspendibleThreadSet when relocating pages.
1127 // Instead the ZRelocateQueue is used as a pseudo STS joiner/leaver.
1128 //
1129 // After the mark_done call a safepointing could be completed and a
1130 // new GC phase could be entered.
1131 forwarding->mark_done();
1132 };
1133
1134 const auto claim_and_do_forwarding = [&](ZForwarding* forwarding) {
1135 if (forwarding->claim()) {
1136 do_forwarding(forwarding);
1137 }
1138 };
1139
1140 const auto do_forwarding_one_from_iter = [&]() {
1141 ZForwarding* forwarding;
1142
1143 if (_iter.next(&forwarding)) {
1144 claim_and_do_forwarding(forwarding);
1145 return true;
1146 }
1147
1148 return false;
1149 };
1150
1151 for (;;) {
1152 // As long as there are requests in the relocate queue, there are threads
1153 // waiting in a VM state that does not allow them to be blocked. The
1154 // worker thread needs to finish relocate these pages, and allow the
1155 // other threads to continue and proceed to a blocking state. After that,
1156 // the worker threads are allowed to safepoint synchronize.
1157 for (ZForwarding* forwarding; (forwarding = _queue->synchronize_poll()) != nullptr;) {
1158 do_forwarding(forwarding);
1159 }
1160
1161 if (!do_forwarding_one_from_iter()) {
1162 // No more work
1163 break;
1164 }
1165
1166 if (_generation->should_worker_resize()) {
1167 break;
1168 }
1169 }
1170
1171 _queue->leave();
1172 }
1173
1174 virtual void resize_workers(uint nworkers) {
1175 _queue->resize_workers(nworkers);
1176 }
1177 };
1178
1179 static void remap_and_maybe_add_remset(volatile zpointer* p) {
1180 const zpointer ptr = Atomic::load(p);
1181
1182 if (ZPointer::is_store_good(ptr)) {
1183 // Already has a remset entry
1184 return;
1185 }
1186
1187 // Remset entries are used for two reasons:
1188 // 1) Young marking old-to-young pointer roots
1189 // 2) Deferred remapping of stale old-to-young pointers
1190 //
1191 // This load barrier will up-front perform the remapping of (2),
1192 // and the code below only has to make sure we register up-to-date
1193 // old-to-young pointers for (1).
1194 const zaddress addr = ZBarrier::load_barrier_on_oop_field_preloaded(p, ptr);
1195
1196 if (is_null(addr)) {
1197 // No need for remset entries for null pointers
1198 return;
1199 }
1200
1201 if (ZHeap::heap()->is_old(addr)) {
1202 // No need for remset entries for pointers to old gen
1203 return;
1204 }
1205
1206 ZRelocate::add_remset(p);
1207 }
1208
1209 class ZRelocateAddRemsetForFlipPromoted : public ZRestartableTask {
1210 private:
1211 ZStatTimerYoung _timer;
1212 ZArrayParallelIterator<ZPage*> _iter;
1213
1214 public:
1215 ZRelocateAddRemsetForFlipPromoted(ZArray<ZPage*>* pages)
1216 : ZRestartableTask("ZRelocateAddRemsetForFlipPromoted"),
1217 _timer(ZSubPhaseConcurrentRelocateRememberedSetFlipPromotedYoung),
1218 _iter(pages) {}
1219
1220 virtual void work() {
1221 SuspendibleThreadSetJoiner sts_joiner;
1222
1223 for (ZPage* page; _iter.next(&page);) {
1224 page->object_iterate([&](oop obj) {
1225 ZIterator::basic_oop_iterate_safe(obj, remap_and_maybe_add_remset);
1226 });
1227
1228 SuspendibleThreadSet::yield();
1229 if (ZGeneration::young()->should_worker_resize()) {
1230 return;
1231 }
1232 }
1233 }
1234 };
1235
1236 void ZRelocate::relocate(ZRelocationSet* relocation_set) {
1237 {
1238 // Install the store buffer's base pointers before the
1239 // relocate task destroys the liveness information in
1240 // the relocated pages.
1241 ZRelocateStoreBufferInstallBasePointersTask buffer_task(_generation);
1242 workers()->run(&buffer_task);
1243 }
1244
1245 {
1246 ZRelocateTask relocate_task(relocation_set, &_queue);
1247 workers()->run(&relocate_task);
1248 }
1249
1250 if (relocation_set->generation()->is_young()) {
1251 ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
1252 workers()->run(&task);
1253 }
1254 }
1255
1256 ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
1257 if (from_age == ZPageAge::old) {
1258 return ZPageAge::old;
1259 }
1260
1261 const uint age = static_cast<uint>(from_age);
1262 if (age >= ZGeneration::young()->tenuring_threshold()) {
1263 return ZPageAge::old;
1264 }
1265
1266 return static_cast<ZPageAge>(age + 1);
1267 }
1268
1269 class ZFlipAgePagesTask : public ZTask {
1270 private:
1271 ZArrayParallelIterator<ZPage*> _iter;
1272
1273 public:
1274 ZFlipAgePagesTask(const ZArray<ZPage*>* pages)
1275 : ZTask("ZPromotePagesTask"),
1276 _iter(pages) {}
1277
1278 virtual void work() {
1279 SuspendibleThreadSetJoiner sts_joiner;
1280 ZArray<ZPage*> promoted_pages;
1281
1282 for (ZPage* prev_page; _iter.next(&prev_page);) {
1283 const ZPageAge from_age = prev_page->age();
1284 const ZPageAge to_age = ZRelocate::compute_to_age(from_age);
1285 assert(from_age != ZPageAge::old, "invalid age for a young collection");
1286
1287 // Figure out if this is proper promotion
1288 const bool promotion = to_age == ZPageAge::old;
1289
1290 if (promotion) {
1291 // Before promoting an object (and before relocate start), we must ensure that all
1292 // contained zpointers are store good. The marking code ensures that for non-null
1293 // pointers, but null pointers are ignored. This code ensures that even null pointers
1294 // are made store good, for the promoted objects.
1295 prev_page->object_iterate([&](oop obj) {
1296 ZIterator::basic_oop_iterate_safe(obj, ZBarrier::promote_barrier_on_young_oop_field);
1297 });
1298 }
1299
1300 // Logging
1301 prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
1302
1303 // Setup to-space page
1304 ZPage* const new_page = promotion ? prev_page->clone_limited_promote_flipped() : prev_page;
1305 new_page->reset(to_age, ZPageResetType::FlipAging);
1306
1307 if (promotion) {
1308 ZGeneration::young()->flip_promote(prev_page, new_page);
1309 // Defer promoted page registration times the lock is taken
1310 promoted_pages.push(prev_page);
1311 }
1312
1313 SuspendibleThreadSet::yield();
1314 }
1315
1316 ZGeneration::young()->register_flip_promoted(promoted_pages);
1317 }
1318 };
1319
1320 void ZRelocate::flip_age_pages(const ZArray<ZPage*>* pages) {
1321 ZFlipAgePagesTask flip_age_task(pages);
1322 workers()->run(&flip_age_task);
1323 }
1324
1325 void ZRelocate::synchronize() {
1326 _queue.synchronize();
1327 }
1328
1329 void ZRelocate::desynchronize() {
1330 _queue.desynchronize();
1331 }
1332
1333 ZRelocateQueue* ZRelocate::queue() {
1334 return &_queue;
1335 }
1336
1337 bool ZRelocate::is_queue_active() const {
1338 return _queue.is_active();
1339 }