1 /*
2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/continuationGCSupport.hpp"
29 #include "gc/shared/gcTraceTime.inline.hpp"
30 #include "gc/shared/preservedMarks.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "gc/shared/workerThread.hpp"
33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
37 #include "gc/shenandoah/shenandoahFullGC.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahMetrics.hpp"
46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
50 #include "gc/shenandoah/shenandoahUtils.hpp"
51 #include "gc/shenandoah/shenandoahVerifier.hpp"
52 #include "gc/shenandoah/shenandoahVMOperations.hpp"
53 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
54 #include "memory/metaspaceUtils.hpp"
55 #include "memory/universe.hpp"
56 #include "oops/compressedOops.inline.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "runtime/javaThread.hpp"
59 #include "runtime/orderAccess.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/copy.hpp"
62 #include "utilities/events.hpp"
63 #include "utilities/growableArray.hpp"
64
65 ShenandoahFullGC::ShenandoahFullGC() :
66 _gc_timer(ShenandoahHeap::heap()->gc_timer()),
67 _preserved_marks(new PreservedMarksSet(true)) {}
68
69 ShenandoahFullGC::~ShenandoahFullGC() {
70 delete _preserved_marks;
71 }
72
73 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
74 vmop_entry_full(cause);
75 // Always success
76 return true;
77 }
78
88
89 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
90 static const char* msg = "Pause Full";
91 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
92 EventMark em("%s", msg);
93
94 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
95 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
96 "full gc");
97
98 op_full(cause);
99 }
100
101 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
102 ShenandoahMetricsSnapshot metrics;
103 metrics.snap_before();
104
105 // Perform full GC
106 do_it(cause);
107
108 metrics.snap_after();
109
110 if (metrics.is_good_progress()) {
111 ShenandoahHeap::heap()->notify_gc_progress();
112 } else {
113 // Nothing to do. Tell the allocation path that we have failed to make
114 // progress, and it can finally fail.
115 ShenandoahHeap::heap()->notify_gc_no_progress();
116 }
117 }
118
119 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
120 ShenandoahHeap* heap = ShenandoahHeap::heap();
121
122 if (ShenandoahVerify) {
123 heap->verifier()->verify_before_fullgc();
124 }
125
126 if (VerifyBeforeGC) {
127 Universe::verify();
128 }
129
130 // Degenerated GC may carry concurrent root flags when upgrading to
131 // full GC. We need to reset it before mutators resume.
132 heap->set_concurrent_strong_root_in_progress(false);
133 heap->set_concurrent_weak_root_in_progress(false);
134
135 heap->set_full_gc_in_progress(true);
136
137 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
138 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
139
140 {
141 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
144
145 {
146 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
147 // Full GC is supposed to recover from any GC state:
148
149 // a0. Remember if we have forwarded objects
150 bool has_forwarded_objects = heap->has_forwarded_objects();
151
152 // a1. Cancel evacuation, if in progress
153 if (heap->is_evacuation_in_progress()) {
154 heap->set_evacuation_in_progress(false);
155 }
156 assert(!heap->is_evacuation_in_progress(), "sanity");
157
158 // a2. Cancel update-refs, if in progress
159 if (heap->is_update_refs_in_progress()) {
160 heap->set_update_refs_in_progress(false);
161 }
162 assert(!heap->is_update_refs_in_progress(), "sanity");
163
164 // b. Cancel concurrent mark, if in progress
165 if (heap->is_concurrent_mark_in_progress()) {
166 ShenandoahConcurrentGC::cancel();
167 heap->set_concurrent_mark_in_progress(false);
168 }
169 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
170
171 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
172 if (has_forwarded_objects) {
173 update_roots(true /*full_gc*/);
174 }
175
176 // d. Reset the bitmaps for new marking
177 heap->reset_mark_bitmap();
178 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
179 assert(!heap->marking_context()->is_complete(), "sanity");
180
181 // e. Abandon reference discovery and clear all discovered references.
182 ShenandoahReferenceProcessor* rp = heap->ref_processor();
183 rp->abandon_partial_discovery();
184
185 // f. Sync pinned region status from the CP marks
186 heap->sync_pinned_region_status();
187
188 // The rest of prologue:
189 _preserved_marks->init(heap->workers()->active_workers());
190
191 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
192 }
193
194 if (UseTLAB) {
195 heap->gclabs_retire(ResizeTLAB);
196 heap->tlabs_retire(ResizeTLAB);
197 }
198
199 OrderAccess::fence();
200
201 phase1_mark_heap();
202
203 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
204 // Coming out of Full GC, we would not have any forwarded objects.
205 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
206 heap->set_has_forwarded_objects(false);
207
208 heap->set_full_gc_move_in_progress(true);
209
210 // Setup workers for the rest
211 OrderAccess::fence();
212
213 // Initialize worker slices
214 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
215 for (uint i = 0; i < heap->max_workers(); i++) {
216 worker_slices[i] = new ShenandoahHeapRegionSet();
217 }
218
219 {
220 // The rest of code performs region moves, where region status is undefined
221 // until all phases run together.
222 ShenandoahHeapLocker lock(heap->lock());
223
224 phase2_calculate_target_addresses(worker_slices);
225
226 OrderAccess::fence();
227
228 phase3_update_references();
229
230 phase4_compact_objects(worker_slices);
231 }
232
233 {
234 // Epilogue
235 _preserved_marks->restore(heap->workers());
236 _preserved_marks->reclaim();
237 }
238
239 // Resize metaspace
240 MetaspaceGC::compute_new_size();
241
242 // Free worker slices
243 for (uint i = 0; i < heap->max_workers(); i++) {
244 delete worker_slices[i];
245 }
246 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
247
248 heap->set_full_gc_move_in_progress(false);
249 heap->set_full_gc_in_progress(false);
250
251 if (ShenandoahVerify) {
252 heap->verifier()->verify_after_fullgc();
253 }
254
255 if (VerifyAfterGC) {
256 Universe::verify();
257 }
258
259 {
260 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
261 heap->post_full_gc_dump(_gc_timer);
262 }
263 }
264
265 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
266 private:
267 ShenandoahMarkingContext* const _ctx;
268
269 public:
270 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
271
272 void heap_region_do(ShenandoahHeapRegion *r) {
273 _ctx->capture_top_at_mark_start(r);
274 r->clear_live_data();
275 }
276 };
277
278 void ShenandoahFullGC::phase1_mark_heap() {
279 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
280 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
281
282 ShenandoahHeap* heap = ShenandoahHeap::heap();
283
284 ShenandoahPrepareForMarkClosure cl;
285 heap->heap_region_iterate(&cl);
286
287 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
288
289 ShenandoahReferenceProcessor* rp = heap->ref_processor();
290 // enable ("weak") refs discovery
291 rp->set_soft_reference_policy(true); // forcefully purge all soft references
292
293 ShenandoahSTWMark mark(true /*full_gc*/);
294 mark.mark();
295 heap->parallel_cleaning(true /* full_gc */);
296 }
297
298 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
299 private:
300 PreservedMarks* const _preserved_marks;
301 ShenandoahHeap* const _heap;
302 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
303 int _empty_regions_pos;
304 ShenandoahHeapRegion* _to_region;
305 ShenandoahHeapRegion* _from_region;
306 HeapWord* _compact_point;
307
308 public:
309 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
310 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
311 ShenandoahHeapRegion* to_region) :
312 _preserved_marks(preserved_marks),
313 _heap(ShenandoahHeap::heap()),
314 _empty_regions(empty_regions),
315 _empty_regions_pos(0),
316 _to_region(to_region),
317 _from_region(nullptr),
318 _compact_point(to_region->bottom()) {}
319
320 void set_from_region(ShenandoahHeapRegion* from_region) {
321 _from_region = from_region;
322 }
323
324 void finish_region() {
325 assert(_to_region != nullptr, "should not happen");
326 _to_region->set_new_top(_compact_point);
327 }
328
329 bool is_compact_same_region() {
330 return _from_region == _to_region;
331 }
332
333 int empty_regions_pos() {
334 return _empty_regions_pos;
335 }
336
337 void do_object(oop p) {
338 assert(_from_region != nullptr, "must set before work");
339 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
340 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
341
342 size_t obj_size = p->size();
343 if (_compact_point + obj_size > _to_region->end()) {
344 finish_region();
345
346 // Object doesn't fit. Pick next empty region and start compacting there.
347 ShenandoahHeapRegion* new_to_region;
348 if (_empty_regions_pos < _empty_regions.length()) {
349 new_to_region = _empty_regions.at(_empty_regions_pos);
350 _empty_regions_pos++;
351 } else {
352 // Out of empty region? Compact within the same region.
353 new_to_region = _from_region;
354 }
355
356 assert(new_to_region != _to_region, "must not reuse same to-region");
357 assert(new_to_region != nullptr, "must not be null");
358 _to_region = new_to_region;
359 _compact_point = _to_region->bottom();
360 }
361
362 // Object fits into current region, record new location:
363 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
364 shenandoah_assert_not_forwarded(nullptr, p);
365 _preserved_marks->push_if_necessary(p, p->mark());
366 p->forward_to(cast_to_oop(_compact_point));
367 _compact_point += obj_size;
368 }
369 };
370
371 class ShenandoahPrepareForCompactionTask : public WorkerTask {
372 private:
373 PreservedMarksSet* const _preserved_marks;
374 ShenandoahHeap* const _heap;
375 ShenandoahHeapRegionSet** const _worker_slices;
376
377 public:
378 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
379 WorkerTask("Shenandoah Prepare For Compaction"),
380 _preserved_marks(preserved_marks),
381 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
382 }
383
384 static bool is_candidate_region(ShenandoahHeapRegion* r) {
385 // Empty region: get it into the slice to defragment the slice itself.
386 // We could have skipped this without violating correctness, but we really
387 // want to compact all live regions to the start of the heap, which sometimes
388 // means moving them into the fully empty regions.
389 if (r->is_empty()) return true;
390
391 // Can move the region, and this is not the humongous region. Humongous
392 // moves are special cased here, because their moves are handled separately.
393 return r->is_stw_move_allowed() && !r->is_humongous();
394 }
395
396 void work(uint worker_id) {
397 ShenandoahParallelWorkerSession worker_session(worker_id);
398 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
399 ShenandoahHeapRegionSetIterator it(slice);
400 ShenandoahHeapRegion* from_region = it.next();
401 // No work?
402 if (from_region == nullptr) {
403 return;
404 }
405
406 // Sliding compaction. Walk all regions in the slice, and compact them.
407 // Remember empty regions and reuse them as needed.
408 ResourceMark rm;
409
410 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
411
412 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
413
414 while (from_region != nullptr) {
415 assert(is_candidate_region(from_region), "Sanity");
416
417 cl.set_from_region(from_region);
418 if (from_region->has_live()) {
419 _heap->marked_object_iterate(from_region, &cl);
420 }
421
422 // Compacted the region to somewhere else? From-region is empty then.
423 if (!cl.is_compact_same_region()) {
424 empty_regions.append(from_region);
425 }
426 from_region = it.next();
427 }
428 cl.finish_region();
429
430 // Mark all remaining regions as empty
431 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
432 ShenandoahHeapRegion* r = empty_regions.at(pos);
433 r->set_new_top(r->bottom());
434 }
435 }
436 };
437
438 void ShenandoahFullGC::calculate_target_humongous_objects() {
439 ShenandoahHeap* heap = ShenandoahHeap::heap();
440
441 // Compute the new addresses for humongous objects. We need to do this after addresses
442 // for regular objects are calculated, and we know what regions in heap suffix are
443 // available for humongous moves.
444 //
445 // Scan the heap backwards, because we are compacting humongous regions towards the end.
446 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
447 // humongous start there.
448 //
449 // The complication is potential non-movable regions during the scan. If such region is
450 // detected, then sliding restarts towards that non-movable region.
451
452 size_t to_begin = heap->num_regions();
453 size_t to_end = heap->num_regions();
454
455 for (size_t c = heap->num_regions(); c > 0; c--) {
456 ShenandoahHeapRegion *r = heap->get_region(c - 1);
457 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
458 // To-region candidate: record this, and continue scan
459 to_begin = r->index();
460 continue;
461 }
462
463 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
464 // From-region candidate: movable humongous region
465 oop old_obj = cast_to_oop(r->bottom());
466 size_t words_size = old_obj->size();
467 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
468
469 size_t start = to_end - num_regions;
470
471 if (start >= to_begin && start != r->index()) {
472 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
473 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
474 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
477 }
478 }
479
480 // Failed to fit. Scan starting from current region.
481 to_begin = r->index();
482 to_end = r->index();
483 }
484 }
485
486 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
487 private:
488 ShenandoahHeap* const _heap;
489
490 public:
491 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
492 void heap_region_do(ShenandoahHeapRegion* r) {
493 if (r->is_trash()) {
494 r->recycle();
495 }
496 if (r->is_cset()) {
497 r->make_regular_bypass();
498 }
499 if (r->is_empty_uncommitted()) {
500 r->make_committed_bypass();
501 }
502 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
503
504 // Record current region occupancy: this communicates empty regions are free
505 // to the rest of Full GC code.
506 r->set_new_top(r->top());
507 }
508 };
509
510 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
511 private:
512 ShenandoahHeap* const _heap;
513 ShenandoahMarkingContext* const _ctx;
514
515 public:
516 ShenandoahTrashImmediateGarbageClosure() :
517 _heap(ShenandoahHeap::heap()),
518 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
519
520 void heap_region_do(ShenandoahHeapRegion* r) {
521 if (r->is_humongous_start()) {
522 oop humongous_obj = cast_to_oop(r->bottom());
523 if (!_ctx->is_marked(humongous_obj)) {
524 assert(!r->has_live(),
525 "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
526 _heap->trash_humongous_region_at(r);
527 } else {
528 assert(r->has_live(),
529 "Region " SIZE_FORMAT " should have live", r->index());
530 }
531 } else if (r->is_humongous_continuation()) {
532 // If we hit continuation, the non-live humongous starts should have been trashed already
533 assert(r->humongous_start_region()->has_live(),
534 "Region " SIZE_FORMAT " should have live", r->index());
535 } else if (r->is_regular()) {
536 if (!r->has_live()) {
537 r->make_trash_immediate();
538 }
539 }
540 }
665 for (size_t wid = 0; wid < n_workers; wid++) {
666 ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
667 ShenandoahHeapRegion* r = it.next();
668 while (r != nullptr) {
669 size_t idx = r->index();
670 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
671 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
672 map.at_put(idx, true);
673 r = it.next();
674 }
675 }
676
677 for (size_t rid = 0; rid < n_regions; rid++) {
678 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
679 bool is_distributed = map.at(rid);
680 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
681 }
682 #endif
683 }
684
685 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
686 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
687 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
688
689 ShenandoahHeap* heap = ShenandoahHeap::heap();
690
691 // About to figure out which regions can be compacted, make sure pinning status
692 // had been updated in GC prologue.
693 heap->assert_pinned_region_status();
694
695 {
696 // Trash the immediately collectible regions before computing addresses
697 ShenandoahTrashImmediateGarbageClosure tigcl;
698 heap->heap_region_iterate(&tigcl);
699
700 // Make sure regions are in good state: committed, active, clean.
701 // This is needed because we are potentially sliding the data through them.
702 ShenandoahEnsureHeapActiveClosure ecl;
703 heap->heap_region_iterate(&ecl);
704 }
705
706 // Compute the new addresses for regular objects
707 {
708 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
709
710 distribute_slices(worker_slices);
711
712 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
713 heap->workers()->run_task(&task);
714 }
715
716 // Compute the new addresses for humongous objects
717 {
718 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
719 calculate_target_humongous_objects();
720 }
721 }
722
723 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
724 private:
725 ShenandoahHeap* const _heap;
726 ShenandoahMarkingContext* const _ctx;
727
728 template <class T>
729 inline void do_oop_work(T* p) {
730 T o = RawAccess<>::oop_load(p);
731 if (!CompressedOops::is_null(o)) {
766
767 class ShenandoahAdjustPointersTask : public WorkerTask {
768 private:
769 ShenandoahHeap* const _heap;
770 ShenandoahRegionIterator _regions;
771
772 public:
773 ShenandoahAdjustPointersTask() :
774 WorkerTask("Shenandoah Adjust Pointers"),
775 _heap(ShenandoahHeap::heap()) {
776 }
777
778 void work(uint worker_id) {
779 ShenandoahParallelWorkerSession worker_session(worker_id);
780 ShenandoahAdjustPointersObjectClosure obj_cl;
781 ShenandoahHeapRegion* r = _regions.next();
782 while (r != nullptr) {
783 if (!r->is_humongous_continuation() && r->has_live()) {
784 _heap->marked_object_iterate(r, &obj_cl);
785 }
786 r = _regions.next();
787 }
788 }
789 };
790
791 class ShenandoahAdjustRootPointersTask : public WorkerTask {
792 private:
793 ShenandoahRootAdjuster* _rp;
794 PreservedMarksSet* _preserved_marks;
795 public:
796 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
797 WorkerTask("Shenandoah Adjust Root Pointers"),
798 _rp(rp),
799 _preserved_marks(preserved_marks) {}
800
801 void work(uint worker_id) {
802 ShenandoahParallelWorkerSession worker_session(worker_id);
803 ShenandoahAdjustPointersClosure cl;
804 _rp->roots_do(worker_id, &cl);
805 _preserved_marks->get(worker_id)->adjust_during_full_gc();
828
829 ShenandoahAdjustPointersTask adjust_pointers_task;
830 workers->run_task(&adjust_pointers_task);
831 }
832
833 class ShenandoahCompactObjectsClosure : public ObjectClosure {
834 private:
835 ShenandoahHeap* const _heap;
836 uint const _worker_id;
837
838 public:
839 ShenandoahCompactObjectsClosure(uint worker_id) :
840 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
841
842 void do_object(oop p) {
843 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
844 size_t size = p->size();
845 if (p->is_forwarded()) {
846 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
847 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
848 Copy::aligned_conjoint_words(compact_from, compact_to, size);
849 oop new_obj = cast_to_oop(compact_to);
850
851 ContinuationGCSupport::relativize_stack_chunk(new_obj);
852 new_obj->init_mark();
853 }
854 }
855 };
856
857 class ShenandoahCompactObjectsTask : public WorkerTask {
858 private:
859 ShenandoahHeap* const _heap;
860 ShenandoahHeapRegionSet** const _worker_slices;
861
862 public:
863 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
864 WorkerTask("Shenandoah Compact Objects"),
865 _heap(ShenandoahHeap::heap()),
866 _worker_slices(worker_slices) {
867 }
869 void work(uint worker_id) {
870 ShenandoahParallelWorkerSession worker_session(worker_id);
871 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
872
873 ShenandoahCompactObjectsClosure cl(worker_id);
874 ShenandoahHeapRegion* r = slice.next();
875 while (r != nullptr) {
876 assert(!r->is_humongous(), "must not get humongous regions here");
877 if (r->has_live()) {
878 _heap->marked_object_iterate(r, &cl);
879 }
880 r->set_top(r->new_top());
881 r = slice.next();
882 }
883 }
884 };
885
886 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
887 private:
888 ShenandoahHeap* const _heap;
889 size_t _live;
890
891 public:
892 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
893 _heap->free_set()->clear();
894 }
895
896 void heap_region_do(ShenandoahHeapRegion* r) {
897 assert (!r->is_cset(), "cset regions should have been demoted already");
898
899 // Need to reset the complete-top-at-mark-start pointer here because
900 // the complete marking bitmap is no longer valid. This ensures
901 // size-based iteration in marked_object_iterate().
902 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
903 // pinned regions.
904 if (!r->is_pinned()) {
905 _heap->complete_marking_context()->reset_top_at_mark_start(r);
906 }
907
908 size_t live = r->used();
909
910 // Make empty regions that have been allocated into regular
911 if (r->is_empty() && live > 0) {
912 r->make_regular_bypass();
913 if (ZapUnusedHeapArea) {
914 SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
915 }
916 }
917
918 // Reclaim regular regions that became empty
919 if (r->is_regular() && live == 0) {
920 r->make_trash();
921 }
922
923 // Recycle all trash regions
924 if (r->is_trash()) {
925 live = 0;
926 r->recycle();
927 }
928
929 r->set_live_data(live);
930 r->reset_alloc_metadata();
931 _live += live;
932 }
933
934 size_t get_live() {
935 return _live;
936 }
937 };
938
939 void ShenandoahFullGC::compact_humongous_objects() {
940 // Compact humongous regions, based on their fwdptr objects.
941 //
942 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
943 // humongous regions are already compacted, and do not require further moves, which alleviates
944 // sliding costs. We may consider doing this in parallel in future.
945
946 ShenandoahHeap* heap = ShenandoahHeap::heap();
947
948 for (size_t c = heap->num_regions(); c > 0; c--) {
949 ShenandoahHeapRegion* r = heap->get_region(c - 1);
950 if (r->is_humongous_start()) {
951 oop old_obj = cast_to_oop(r->bottom());
952 if (!old_obj->is_forwarded()) {
953 // No need to move the object, it stays at the same slot
954 continue;
955 }
956 size_t words_size = old_obj->size();
957 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
958
959 size_t old_start = r->index();
960 size_t old_end = old_start + num_regions - 1;
961 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
962 size_t new_end = new_start + num_regions - 1;
963 assert(old_start != new_start, "must be real move");
964 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
965
966 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
967 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
968
969 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
970 new_obj->init_mark();
971
972 {
973 for (size_t c = old_start; c <= old_end; c++) {
974 ShenandoahHeapRegion* r = heap->get_region(c);
975 r->make_regular_bypass();
976 r->set_top(r->bottom());
977 }
978
979 for (size_t c = new_start; c <= new_end; c++) {
980 ShenandoahHeapRegion* r = heap->get_region(c);
981 if (c == new_start) {
982 r->make_humongous_start_bypass();
983 } else {
984 r->make_humongous_cont_bypass();
985 }
986
987 // Trailing region may be non-full, record the remainder there
988 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
989 if ((c == new_end) && (remainder != 0)) {
990 r->set_top(r->bottom() + remainder);
991 } else {
992 r->set_top(r->end());
993 }
994
995 r->reset_alloc_metadata();
996 }
997 }
998 }
999 }
1000 }
1001
1002 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1003 // we need to remain able to walk pinned regions.
1004 // Since pinned region do not move and don't get compacted, we will get holes with
1030 };
1031
1032 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1033 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1034 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1035
1036 ShenandoahHeap* heap = ShenandoahHeap::heap();
1037
1038 // Compact regular objects first
1039 {
1040 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1041 ShenandoahCompactObjectsTask compact_task(worker_slices);
1042 heap->workers()->run_task(&compact_task);
1043 }
1044
1045 // Compact humongous objects after regular object moves
1046 {
1047 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1048 compact_humongous_objects();
1049 }
1050
1051 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1052 // and must ensure the bitmap is in sync.
1053 {
1054 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1055 ShenandoahMCResetCompleteBitmapTask task;
1056 heap->workers()->run_task(&task);
1057 }
1058
1059 // Bring regions in proper states after the collection, and set heap properties.
1060 {
1061 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1062
1063 ShenandoahPostCompactClosure post_compact;
1064 heap->heap_region_iterate(&post_compact);
1065 heap->set_used(post_compact.get_live());
1066
1067 heap->collection_set()->clear();
1068 heap->free_set()->rebuild();
1069 }
1070
1071 heap->clear_cancelled_gc();
1072 }
|
1 /*
2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/continuationGCSupport.hpp"
30 #include "gc/shared/gcTraceTime.inline.hpp"
31 #include "gc/shared/preservedMarks.inline.hpp"
32 #include "gc/shared/tlab_globals.hpp"
33 #include "gc/shared/workerThread.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
36 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
40 #include "gc/shenandoah/shenandoahFullGC.hpp"
41 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
42 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
44 #include "gc/shenandoah/shenandoahMark.inline.hpp"
45 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
47 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
48 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
50 #include "gc/shenandoah/shenandoahMetrics.hpp"
51 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
52 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
53 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
54 #include "gc/shenandoah/shenandoahSTWMark.hpp"
55 #include "gc/shenandoah/shenandoahUtils.hpp"
56 #include "gc/shenandoah/shenandoahVerifier.hpp"
57 #include "gc/shenandoah/shenandoahVMOperations.hpp"
58 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
59 #include "memory/metaspaceUtils.hpp"
60 #include "memory/universe.hpp"
61 #include "oops/compressedOops.inline.hpp"
62 #include "oops/oop.inline.hpp"
63 #include "runtime/orderAccess.hpp"
64 #include "runtime/vmThread.hpp"
65 #include "utilities/copy.hpp"
66 #include "utilities/events.hpp"
67 #include "utilities/growableArray.hpp"
68
69 ShenandoahFullGC::ShenandoahFullGC() :
70 _gc_timer(ShenandoahHeap::heap()->gc_timer()),
71 _preserved_marks(new PreservedMarksSet(true)) {}
72
73 ShenandoahFullGC::~ShenandoahFullGC() {
74 delete _preserved_marks;
75 }
76
77 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
78 vmop_entry_full(cause);
79 // Always success
80 return true;
81 }
82
92
93 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
94 static const char* msg = "Pause Full";
95 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
96 EventMark em("%s", msg);
97
98 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
99 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
100 "full gc");
101
102 op_full(cause);
103 }
104
105 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
106 ShenandoahMetricsSnapshot metrics;
107 metrics.snap_before();
108
109 // Perform full GC
110 do_it(cause);
111
112 ShenandoahHeap* const heap = ShenandoahHeap::heap();
113
114 if (heap->mode()->is_generational()) {
115 ShenandoahGenerationalFullGC::handle_completion(heap);
116 }
117
118 metrics.snap_after();
119
120 if (metrics.is_good_progress()) {
121 heap->notify_gc_progress();
122 } else {
123 // Nothing to do. Tell the allocation path that we have failed to make
124 // progress, and it can finally fail.
125 heap->notify_gc_no_progress();
126 }
127
128 // Regardless if progress was made, we record that we completed a "successful" full GC.
129 heap->global_generation()->heuristics()->record_success_full();
130 heap->shenandoah_policy()->record_success_full();
131 }
132
133 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
134 ShenandoahHeap* heap = ShenandoahHeap::heap();
135
136 if (heap->mode()->is_generational()) {
137 ShenandoahGenerationalFullGC::prepare();
138 }
139
140 if (ShenandoahVerify) {
141 heap->verifier()->verify_before_fullgc();
142 }
143
144 if (VerifyBeforeGC) {
145 Universe::verify();
146 }
147
148 // Degenerated GC may carry concurrent root flags when upgrading to
149 // full GC. We need to reset it before mutators resume.
150 heap->set_concurrent_strong_root_in_progress(false);
151 heap->set_concurrent_weak_root_in_progress(false);
152
153 heap->set_full_gc_in_progress(true);
154
155 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
156 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
157
158 {
159 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
162
163 {
164 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
165 // Full GC is supposed to recover from any GC state:
166
167 // a0. Remember if we have forwarded objects
168 bool has_forwarded_objects = heap->has_forwarded_objects();
169
170 // a1. Cancel evacuation, if in progress
171 if (heap->is_evacuation_in_progress()) {
172 heap->set_evacuation_in_progress(false);
173 }
174 assert(!heap->is_evacuation_in_progress(), "sanity");
175
176 // a2. Cancel update-refs, if in progress
177 if (heap->is_update_refs_in_progress()) {
178 heap->set_update_refs_in_progress(false);
179 }
180 assert(!heap->is_update_refs_in_progress(), "sanity");
181
182 // b. Cancel all concurrent marks, if in progress
183 if (heap->is_concurrent_mark_in_progress()) {
184 // TODO: Send cancel_concurrent_mark upstream? Does it really not have it already?
185 heap->cancel_concurrent_mark();
186 }
187 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
188
189 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
190 if (has_forwarded_objects) {
191 update_roots(true /*full_gc*/);
192 }
193
194 // d. Reset the bitmaps for new marking
195 heap->global_generation()->reset_mark_bitmap();
196 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
197 assert(!heap->global_generation()->is_mark_complete(), "sanity");
198
199 // e. Abandon reference discovery and clear all discovered references.
200 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
201 rp->abandon_partial_discovery();
202
203 // f. Sync pinned region status from the CP marks
204 heap->sync_pinned_region_status();
205
206 if (heap->mode()->is_generational()) {
207 ShenandoahGenerationalFullGC::restore_top_before_promote(heap);
208 }
209
210 // The rest of prologue:
211 _preserved_marks->init(heap->workers()->active_workers());
212
213 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
214 }
215
216 if (UseTLAB) {
217 // TODO: Do we need to explicitly retire PLABs?
218 heap->gclabs_retire(ResizeTLAB);
219 heap->tlabs_retire(ResizeTLAB);
220 }
221
222 OrderAccess::fence();
223
224 phase1_mark_heap();
225
226 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
227 // Coming out of Full GC, we would not have any forwarded objects.
228 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
229 heap->set_has_forwarded_objects(false);
230
231 heap->set_full_gc_move_in_progress(true);
232
233 // Setup workers for the rest
234 OrderAccess::fence();
235
236 // Initialize worker slices
237 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
238 for (uint i = 0; i < heap->max_workers(); i++) {
239 worker_slices[i] = new ShenandoahHeapRegionSet();
240 }
241
242 {
243 // The rest of code performs region moves, where region status is undefined
244 // until all phases run together.
245 ShenandoahHeapLocker lock(heap->lock());
246
247 phase2_calculate_target_addresses(worker_slices);
248
249 OrderAccess::fence();
250
251 phase3_update_references();
252
253 phase4_compact_objects(worker_slices);
254
255 phase5_epilog();
256 }
257
258 // Resize metaspace
259 MetaspaceGC::compute_new_size();
260
261 // Free worker slices
262 for (uint i = 0; i < heap->max_workers(); i++) {
263 delete worker_slices[i];
264 }
265 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
266
267 heap->set_full_gc_move_in_progress(false);
268 heap->set_full_gc_in_progress(false);
269
270 if (ShenandoahVerify) {
271 heap->verifier()->verify_after_fullgc();
272 }
273
274 // Humongous regions are promoted on demand and are accounted for by normal Full GC mechanisms.
275 if (VerifyAfterGC) {
276 Universe::verify();
277 }
278
279 {
280 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
281 heap->post_full_gc_dump(_gc_timer);
282 }
283 }
284
285 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
286 private:
287 ShenandoahMarkingContext* const _ctx;
288
289 public:
290 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
291
292 void heap_region_do(ShenandoahHeapRegion *r) {
293 // TODO: Add API to heap to skip free regions
294 if (r->is_affiliated()) {
295 _ctx->capture_top_at_mark_start(r);
296 r->clear_live_data();
297 }
298 }
299
300 bool is_thread_safe() { return true; }
301 };
302
303 void ShenandoahFullGC::phase1_mark_heap() {
304 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
305 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
306
307 ShenandoahHeap* heap = ShenandoahHeap::heap();
308
309 ShenandoahPrepareForMarkClosure cl;
310 heap->parallel_heap_region_iterate(&cl);
311
312 heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
313
314 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
315 // enable ("weak") refs discovery
316 rp->set_soft_reference_policy(true); // forcefully purge all soft references
317
318 ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
319 mark.mark();
320 heap->parallel_cleaning(true /* full_gc */);
321
322 if (ShenandoahHeap::heap()->mode()->is_generational()) {
323 ShenandoahGenerationalFullGC::log_live_in_old(heap);
324 }
325 }
326
327 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
328 private:
329 PreservedMarks* const _preserved_marks;
330 ShenandoahHeap* const _heap;
331 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
332 int _empty_regions_pos;
333 ShenandoahHeapRegion* _to_region;
334 ShenandoahHeapRegion* _from_region;
335 HeapWord* _compact_point;
336
337 public:
338 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
339 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
340 ShenandoahHeapRegion* to_region) :
341 _preserved_marks(preserved_marks),
342 _heap(ShenandoahHeap::heap()),
343 _empty_regions(empty_regions),
344 _empty_regions_pos(0),
345 _to_region(to_region),
346 _from_region(nullptr),
347 _compact_point(to_region->bottom()) {}
348
349 void set_from_region(ShenandoahHeapRegion* from_region) {
350 _from_region = from_region;
351 }
352
353 void finish() {
354 assert(_to_region != nullptr, "should not happen");
355 _to_region->set_new_top(_compact_point);
356 }
357
358 bool is_compact_same_region() {
359 return _from_region == _to_region;
360 }
361
362 int empty_regions_pos() {
363 return _empty_regions_pos;
364 }
365
366 void do_object(oop p) {
367 assert(_from_region != nullptr, "must set before work");
368 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
369 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
370
371 size_t obj_size = p->size();
372 if (_compact_point + obj_size > _to_region->end()) {
373 finish();
374
375 // Object doesn't fit. Pick next empty region and start compacting there.
376 ShenandoahHeapRegion* new_to_region;
377 if (_empty_regions_pos < _empty_regions.length()) {
378 new_to_region = _empty_regions.at(_empty_regions_pos);
379 _empty_regions_pos++;
380 } else {
381 // Out of empty region? Compact within the same region.
382 new_to_region = _from_region;
383 }
384
385 assert(new_to_region != _to_region, "must not reuse same to-region");
386 assert(new_to_region != nullptr, "must not be null");
387 _to_region = new_to_region;
388 _compact_point = _to_region->bottom();
389 }
390
391 // Object fits into current region, record new location, if object does not move:
392 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
393 shenandoah_assert_not_forwarded(nullptr, p);
394 if (_compact_point != cast_from_oop<HeapWord*>(p)) {
395 _preserved_marks->push_if_necessary(p, p->mark());
396 p->forward_to(cast_to_oop(_compact_point));
397 }
398 _compact_point += obj_size;
399 }
400 };
401
402 class ShenandoahPrepareForCompactionTask : public WorkerTask {
403 private:
404 PreservedMarksSet* const _preserved_marks;
405 ShenandoahHeap* const _heap;
406 ShenandoahHeapRegionSet** const _worker_slices;
407
408 public:
409 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
410 WorkerTask("Shenandoah Prepare For Compaction"),
411 _preserved_marks(preserved_marks),
412 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
413 }
414
415 static bool is_candidate_region(ShenandoahHeapRegion* r) {
416 // Empty region: get it into the slice to defragment the slice itself.
417 // We could have skipped this without violating correctness, but we really
418 // want to compact all live regions to the start of the heap, which sometimes
419 // means moving them into the fully empty regions.
420 if (r->is_empty()) return true;
421
422 // Can move the region, and this is not the humongous region. Humongous
423 // moves are special cased here, because their moves are handled separately.
424 return r->is_stw_move_allowed() && !r->is_humongous();
425 }
426
427 void work(uint worker_id) override;
428 private:
429 template<typename ClosureType>
430 void prepare_for_compaction(ClosureType& cl,
431 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
432 ShenandoahHeapRegionSetIterator& it,
433 ShenandoahHeapRegion* from_region);
434 };
435
436 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
437 ShenandoahParallelWorkerSession worker_session(worker_id);
438 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
439 ShenandoahHeapRegionSetIterator it(slice);
440 ShenandoahHeapRegion* from_region = it.next();
441 // No work?
442 if (from_region == nullptr) {
443 return;
444 }
445
446 // Sliding compaction. Walk all regions in the slice, and compact them.
447 // Remember empty regions and reuse them as needed.
448 ResourceMark rm;
449
450 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
451
452 if (_heap->mode()->is_generational()) {
453 ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id),
454 empty_regions, from_region, worker_id);
455 prepare_for_compaction(cl, empty_regions, it, from_region);
456 } else {
457 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
458 prepare_for_compaction(cl, empty_regions, it, from_region);
459 }
460 }
461
462 template<typename ClosureType>
463 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl,
464 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
465 ShenandoahHeapRegionSetIterator& it,
466 ShenandoahHeapRegion* from_region) {
467 while (from_region != nullptr) {
468 assert(is_candidate_region(from_region), "Sanity");
469 cl.set_from_region(from_region);
470 if (from_region->has_live()) {
471 _heap->marked_object_iterate(from_region, &cl);
472 }
473
474 // Compacted the region to somewhere else? From-region is empty then.
475 if (!cl.is_compact_same_region()) {
476 empty_regions.append(from_region);
477 }
478 from_region = it.next();
479 }
480 cl.finish();
481
482 // Mark all remaining regions as empty
483 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
484 ShenandoahHeapRegion* r = empty_regions.at(pos);
485 r->set_new_top(r->bottom());
486 }
487 }
488
489 void ShenandoahFullGC::calculate_target_humongous_objects() {
490 ShenandoahHeap* heap = ShenandoahHeap::heap();
491
492 // Compute the new addresses for humongous objects. We need to do this after addresses
493 // for regular objects are calculated, and we know what regions in heap suffix are
494 // available for humongous moves.
495 //
496 // Scan the heap backwards, because we are compacting humongous regions towards the end.
497 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
498 // humongous start there.
499 //
500 // The complication is potential non-movable regions during the scan. If such region is
501 // detected, then sliding restarts towards that non-movable region.
502
503 size_t to_begin = heap->num_regions();
504 size_t to_end = heap->num_regions();
505
506 log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
507 for (size_t c = heap->num_regions(); c > 0; c--) {
508 ShenandoahHeapRegion *r = heap->get_region(c - 1);
509 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
510 // To-region candidate: record this, and continue scan
511 to_begin = r->index();
512 continue;
513 }
514
515 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
516 // From-region candidate: movable humongous region
517 oop old_obj = cast_to_oop(r->bottom());
518 size_t words_size = old_obj->size();
519 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
520
521 size_t start = to_end - num_regions;
522
523 if (start >= to_begin && start != r->index()) {
524 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
525 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
526 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
529 }
530 }
531
532 // Failed to fit. Scan starting from current region.
533 to_begin = r->index();
534 to_end = r->index();
535 }
536 }
537
538 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
539 private:
540 ShenandoahHeap* const _heap;
541
542 public:
543 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
544 void heap_region_do(ShenandoahHeapRegion* r) {
545 if (r->is_trash()) {
546 r->recycle();
547 }
548 if (r->is_cset()) {
549 // Leave affiliation unchanged
550 r->make_regular_bypass();
551 }
552 if (r->is_empty_uncommitted()) {
553 r->make_committed_bypass();
554 }
555 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
556
557 // Record current region occupancy: this communicates empty regions are free
558 // to the rest of Full GC code.
559 r->set_new_top(r->top());
560 }
561 };
562
563 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
564 private:
565 ShenandoahHeap* const _heap;
566 ShenandoahMarkingContext* const _ctx;
567
568 public:
569 ShenandoahTrashImmediateGarbageClosure() :
570 _heap(ShenandoahHeap::heap()),
571 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
572
573 void heap_region_do(ShenandoahHeapRegion* r) {
574 if (!r->is_affiliated()) {
575 // Ignore free regions
576 // TODO: change iterators so they do not process FREE regions.
577 return;
578 }
579
580 if (r->is_humongous_start()) {
581 oop humongous_obj = cast_to_oop(r->bottom());
582 if (!_ctx->is_marked(humongous_obj)) {
583 assert(!r->has_live(),
584 "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
585 _heap->trash_humongous_region_at(r);
586 } else {
587 assert(r->has_live(),
588 "Region " SIZE_FORMAT " should have live", r->index());
589 }
590 } else if (r->is_humongous_continuation()) {
591 // If we hit continuation, the non-live humongous starts should have been trashed already
592 assert(r->humongous_start_region()->has_live(),
593 "Region " SIZE_FORMAT " should have live", r->index());
594 } else if (r->is_regular()) {
595 if (!r->has_live()) {
596 r->make_trash_immediate();
597 }
598 }
599 }
724 for (size_t wid = 0; wid < n_workers; wid++) {
725 ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
726 ShenandoahHeapRegion* r = it.next();
727 while (r != nullptr) {
728 size_t idx = r->index();
729 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
730 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
731 map.at_put(idx, true);
732 r = it.next();
733 }
734 }
735
736 for (size_t rid = 0; rid < n_regions; rid++) {
737 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
738 bool is_distributed = map.at(rid);
739 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
740 }
741 #endif
742 }
743
744 // TODO:
745 // Consider compacting old-gen objects toward the high end of memory and young-gen objects towards the low-end
746 // of memory. As currently implemented, all regions are compacted toward the low-end of memory. This creates more
747 // fragmentation of the heap, because old-gen regions get scattered among low-address regions such that it becomes
748 // more difficult to find contiguous regions for humongous objects.
749 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
750 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
751 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
752
753 ShenandoahHeap* heap = ShenandoahHeap::heap();
754
755 // About to figure out which regions can be compacted, make sure pinning status
756 // had been updated in GC prologue.
757 heap->assert_pinned_region_status();
758
759 {
760 // Trash the immediately collectible regions before computing addresses
761 ShenandoahTrashImmediateGarbageClosure tigcl;
762 heap->heap_region_iterate(&tigcl);
763
764 // Make sure regions are in good state: committed, active, clean.
765 // This is needed because we are potentially sliding the data through them.
766 ShenandoahEnsureHeapActiveClosure ecl;
767 heap->heap_region_iterate(&ecl);
768 }
769
770 // Compute the new addresses for regular objects
771 {
772 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
773
774 distribute_slices(worker_slices);
775
776 // TODO: This is ResourceMark is missing upstream.
777 ResourceMark rm;
778 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
779 heap->workers()->run_task(&task);
780 }
781
782 // Compute the new addresses for humongous objects
783 {
784 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
785 calculate_target_humongous_objects();
786 }
787 }
788
789 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
790 private:
791 ShenandoahHeap* const _heap;
792 ShenandoahMarkingContext* const _ctx;
793
794 template <class T>
795 inline void do_oop_work(T* p) {
796 T o = RawAccess<>::oop_load(p);
797 if (!CompressedOops::is_null(o)) {
832
833 class ShenandoahAdjustPointersTask : public WorkerTask {
834 private:
835 ShenandoahHeap* const _heap;
836 ShenandoahRegionIterator _regions;
837
838 public:
839 ShenandoahAdjustPointersTask() :
840 WorkerTask("Shenandoah Adjust Pointers"),
841 _heap(ShenandoahHeap::heap()) {
842 }
843
844 void work(uint worker_id) {
845 ShenandoahParallelWorkerSession worker_session(worker_id);
846 ShenandoahAdjustPointersObjectClosure obj_cl;
847 ShenandoahHeapRegion* r = _regions.next();
848 while (r != nullptr) {
849 if (!r->is_humongous_continuation() && r->has_live()) {
850 _heap->marked_object_iterate(r, &obj_cl);
851 }
852 if (_heap->mode()->is_generational()) {
853 ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(r);
854 }
855 r = _regions.next();
856 }
857 }
858 };
859
860 class ShenandoahAdjustRootPointersTask : public WorkerTask {
861 private:
862 ShenandoahRootAdjuster* _rp;
863 PreservedMarksSet* _preserved_marks;
864 public:
865 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
866 WorkerTask("Shenandoah Adjust Root Pointers"),
867 _rp(rp),
868 _preserved_marks(preserved_marks) {}
869
870 void work(uint worker_id) {
871 ShenandoahParallelWorkerSession worker_session(worker_id);
872 ShenandoahAdjustPointersClosure cl;
873 _rp->roots_do(worker_id, &cl);
874 _preserved_marks->get(worker_id)->adjust_during_full_gc();
897
898 ShenandoahAdjustPointersTask adjust_pointers_task;
899 workers->run_task(&adjust_pointers_task);
900 }
901
902 class ShenandoahCompactObjectsClosure : public ObjectClosure {
903 private:
904 ShenandoahHeap* const _heap;
905 uint const _worker_id;
906
907 public:
908 ShenandoahCompactObjectsClosure(uint worker_id) :
909 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
910
911 void do_object(oop p) {
912 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
913 size_t size = p->size();
914 if (p->is_forwarded()) {
915 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
916 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
917 assert(compact_from != compact_to, "Forwarded object should move");
918 Copy::aligned_conjoint_words(compact_from, compact_to, size);
919 oop new_obj = cast_to_oop(compact_to);
920
921 ContinuationGCSupport::relativize_stack_chunk(new_obj);
922 new_obj->init_mark();
923 }
924 }
925 };
926
927 class ShenandoahCompactObjectsTask : public WorkerTask {
928 private:
929 ShenandoahHeap* const _heap;
930 ShenandoahHeapRegionSet** const _worker_slices;
931
932 public:
933 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
934 WorkerTask("Shenandoah Compact Objects"),
935 _heap(ShenandoahHeap::heap()),
936 _worker_slices(worker_slices) {
937 }
939 void work(uint worker_id) {
940 ShenandoahParallelWorkerSession worker_session(worker_id);
941 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
942
943 ShenandoahCompactObjectsClosure cl(worker_id);
944 ShenandoahHeapRegion* r = slice.next();
945 while (r != nullptr) {
946 assert(!r->is_humongous(), "must not get humongous regions here");
947 if (r->has_live()) {
948 _heap->marked_object_iterate(r, &cl);
949 }
950 r->set_top(r->new_top());
951 r = slice.next();
952 }
953 }
954 };
955
956 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
957 private:
958 ShenandoahHeap* const _heap;
959 bool _is_generational;
960 size_t _young_regions, _young_usage, _young_humongous_waste;
961 size_t _old_regions, _old_usage, _old_humongous_waste;
962
963 public:
964 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()),
965 _is_generational(_heap->mode()->is_generational()),
966 _young_regions(0),
967 _young_usage(0),
968 _young_humongous_waste(0),
969 _old_regions(0),
970 _old_usage(0),
971 _old_humongous_waste(0)
972 {
973 _heap->free_set()->clear();
974 }
975
976 void heap_region_do(ShenandoahHeapRegion* r) {
977 assert (!r->is_cset(), "cset regions should have been demoted already");
978
979 // Need to reset the complete-top-at-mark-start pointer here because
980 // the complete marking bitmap is no longer valid. This ensures
981 // size-based iteration in marked_object_iterate().
982 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
983 // pinned regions.
984 if (!r->is_pinned()) {
985 _heap->complete_marking_context()->reset_top_at_mark_start(r);
986 }
987
988 size_t live = r->used();
989
990 // Make empty regions that have been allocated into regular
991 if (r->is_empty() && live > 0) {
992 if (!_is_generational) {
993 r->make_young_maybe();
994 }
995 // else, generational mode compaction has already established affiliation.
996 r->make_regular_bypass();
997 if (ZapUnusedHeapArea) {
998 SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
999 }
1000 }
1001
1002 // Reclaim regular regions that became empty
1003 if (r->is_regular() && live == 0) {
1004 r->make_trash();
1005 }
1006
1007 // Recycle all trash regions
1008 if (r->is_trash()) {
1009 live = 0;
1010 r->recycle();
1011 } else {
1012 if (r->is_old()) {
1013 ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
1014 } else if (r->is_young()) {
1015 ShenandoahGenerationalFullGC::account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
1016 }
1017 }
1018 r->set_live_data(live);
1019 r->reset_alloc_metadata();
1020 }
1021
1022 void update_generation_usage() {
1023 if (_is_generational) {
1024 _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste);
1025 _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste);
1026 } else {
1027 assert(_old_regions == 0, "Old regions only expected in generational mode");
1028 assert(_old_usage == 0, "Old usage only expected in generational mode");
1029 assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode");
1030 }
1031
1032 // In generational mode, global usage should be the sum of young and old. This is also true
1033 // for non-generational modes except that there are no old regions.
1034 _heap->global_generation()->establish_usage(_old_regions + _young_regions,
1035 _old_usage + _young_usage,
1036 _old_humongous_waste + _young_humongous_waste);
1037 }
1038 };
1039
1040 void ShenandoahFullGC::compact_humongous_objects() {
1041 // Compact humongous regions, based on their fwdptr objects.
1042 //
1043 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1044 // humongous regions are already compacted, and do not require further moves, which alleviates
1045 // sliding costs. We may consider doing this in parallel in the future.
1046
1047 ShenandoahHeap* heap = ShenandoahHeap::heap();
1048
1049 for (size_t c = heap->num_regions(); c > 0; c--) {
1050 ShenandoahHeapRegion* r = heap->get_region(c - 1);
1051 if (r->is_humongous_start()) {
1052 oop old_obj = cast_to_oop(r->bottom());
1053 if (!old_obj->is_forwarded()) {
1054 // No need to move the object, it stays at the same slot
1055 continue;
1056 }
1057 size_t words_size = old_obj->size();
1058 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1059
1060 size_t old_start = r->index();
1061 size_t old_end = old_start + num_regions - 1;
1062 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1063 size_t new_end = new_start + num_regions - 1;
1064 assert(old_start != new_start, "must be real move");
1065 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1066
1067 log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, old_start, new_start);
1068 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1069 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1070
1071 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1072 new_obj->init_mark();
1073
1074 {
1075 ShenandoahAffiliation original_affiliation = r->affiliation();
1076 for (size_t c = old_start; c <= old_end; c++) {
1077 ShenandoahHeapRegion* r = heap->get_region(c);
1078 // Leave humongous region affiliation unchanged.
1079 r->make_regular_bypass();
1080 r->set_top(r->bottom());
1081 }
1082
1083 for (size_t c = new_start; c <= new_end; c++) {
1084 ShenandoahHeapRegion* r = heap->get_region(c);
1085 if (c == new_start) {
1086 r->make_humongous_start_bypass(original_affiliation);
1087 } else {
1088 r->make_humongous_cont_bypass(original_affiliation);
1089 }
1090
1091 // Trailing region may be non-full, record the remainder there
1092 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1093 if ((c == new_end) && (remainder != 0)) {
1094 r->set_top(r->bottom() + remainder);
1095 } else {
1096 r->set_top(r->end());
1097 }
1098
1099 r->reset_alloc_metadata();
1100 }
1101 }
1102 }
1103 }
1104 }
1105
1106 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1107 // we need to remain able to walk pinned regions.
1108 // Since pinned region do not move and don't get compacted, we will get holes with
1134 };
1135
1136 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1137 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1138 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1139
1140 ShenandoahHeap* heap = ShenandoahHeap::heap();
1141
1142 // Compact regular objects first
1143 {
1144 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1145 ShenandoahCompactObjectsTask compact_task(worker_slices);
1146 heap->workers()->run_task(&compact_task);
1147 }
1148
1149 // Compact humongous objects after regular object moves
1150 {
1151 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1152 compact_humongous_objects();
1153 }
1154 }
1155
1156 void ShenandoahFullGC::phase5_epilog() {
1157 GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
1158 ShenandoahHeap* heap = ShenandoahHeap::heap();
1159
1160 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1161 // and must ensure the bitmap is in sync.
1162 {
1163 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1164 ShenandoahMCResetCompleteBitmapTask task;
1165 heap->workers()->run_task(&task);
1166 }
1167
1168 // Bring regions in proper states after the collection, and set heap properties.
1169 {
1170 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1171 ShenandoahPostCompactClosure post_compact;
1172 heap->heap_region_iterate(&post_compact);
1173 post_compact.update_generation_usage();
1174
1175 if (heap->mode()->is_generational()) {
1176 ShenandoahGenerationalFullGC::balance_generations_after_gc(heap);
1177 }
1178
1179 heap->collection_set()->clear();
1180 size_t young_cset_regions, old_cset_regions;
1181 size_t first_old, last_old, num_old;
1182 heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
1183
1184 // We also do not expand old generation size following Full GC because we have scrambled age populations and
1185 // no longer have objects separated by age into distinct regions.
1186
1187 // TODO: Do we need to fix FullGC so that it maintains aged segregation of objects into distinct regions?
1188 // A partial solution would be to remember how many objects are of tenure age following Full GC, but
1189 // this is probably suboptimal, because most of these objects will not reside in a region that will be
1190 // selected for the next evacuation phase.
1191
1192
1193 if (heap->mode()->is_generational()) {
1194 ShenandoahGenerationalFullGC::compute_balances();
1195 }
1196
1197 heap->free_set()->rebuild(young_cset_regions, old_cset_regions);
1198
1199 heap->clear_cancelled_gc(true /* clear oom handler */);
1200 }
1201
1202 _preserved_marks->restore(heap->workers());
1203 _preserved_marks->reclaim();
1204
1205 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
1206 // abbreviated cycle.
1207 if (heap->mode()->is_generational()) {
1208 ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set();
1209 ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
1210 }
1211 }
|