1 /*
2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/continuationGCSupport.hpp"
29 #include "gc/shared/gcTraceTime.inline.hpp"
30 #include "gc/shared/preservedMarks.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "gc/shared/workerThread.hpp"
33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
37 #include "gc/shenandoah/shenandoahFullGC.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahMetrics.hpp"
46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
50 #include "gc/shenandoah/shenandoahUtils.hpp"
51 #include "gc/shenandoah/shenandoahVerifier.hpp"
52 #include "gc/shenandoah/shenandoahVMOperations.hpp"
53 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
54 #include "memory/metaspaceUtils.hpp"
55 #include "memory/universe.hpp"
56 #include "oops/compressedOops.inline.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "runtime/javaThread.hpp"
59 #include "runtime/orderAccess.hpp"
60 #include "runtime/vmThread.hpp"
61 #include "utilities/copy.hpp"
62 #include "utilities/events.hpp"
63 #include "utilities/growableArray.hpp"
64
65 ShenandoahFullGC::ShenandoahFullGC() :
66 _gc_timer(ShenandoahHeap::heap()->gc_timer()),
67 _preserved_marks(new PreservedMarksSet(true)) {}
68
69 ShenandoahFullGC::~ShenandoahFullGC() {
70 delete _preserved_marks;
71 }
72
73 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
74 vmop_entry_full(cause);
75 // Always success
76 return true;
77 }
78
88
89 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
90 static const char* msg = "Pause Full";
91 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
92 EventMark em("%s", msg);
93
94 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
95 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
96 "full gc");
97
98 op_full(cause);
99 }
100
101 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
102 ShenandoahMetricsSnapshot metrics;
103 metrics.snap_before();
104
105 // Perform full GC
106 do_it(cause);
107
108 metrics.snap_after();
109
110 if (metrics.is_good_progress()) {
111 ShenandoahHeap::heap()->notify_gc_progress();
112 } else {
113 // Nothing to do. Tell the allocation path that we have failed to make
114 // progress, and it can finally fail.
115 ShenandoahHeap::heap()->notify_gc_no_progress();
116 }
117 }
118
119 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
120 ShenandoahHeap* heap = ShenandoahHeap::heap();
121
122 if (ShenandoahVerify) {
123 heap->verifier()->verify_before_fullgc();
124 }
125
126 if (VerifyBeforeGC) {
127 Universe::verify();
128 }
129
130 // Degenerated GC may carry concurrent root flags when upgrading to
131 // full GC. We need to reset it before mutators resume.
132 heap->set_concurrent_strong_root_in_progress(false);
133 heap->set_concurrent_weak_root_in_progress(false);
134
135 heap->set_full_gc_in_progress(true);
136
137 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
138 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
139
140 {
141 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
144
145 {
146 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
147 // Full GC is supposed to recover from any GC state:
148
149 // a0. Remember if we have forwarded objects
150 bool has_forwarded_objects = heap->has_forwarded_objects();
151
152 // a1. Cancel evacuation, if in progress
153 if (heap->is_evacuation_in_progress()) {
154 heap->set_evacuation_in_progress(false);
155 }
156 assert(!heap->is_evacuation_in_progress(), "sanity");
157
158 // a2. Cancel update-refs, if in progress
159 if (heap->is_update_refs_in_progress()) {
160 heap->set_update_refs_in_progress(false);
161 }
162 assert(!heap->is_update_refs_in_progress(), "sanity");
163
164 // b. Cancel concurrent mark, if in progress
165 if (heap->is_concurrent_mark_in_progress()) {
166 ShenandoahConcurrentGC::cancel();
167 heap->set_concurrent_mark_in_progress(false);
168 }
169 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
170
171 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
172 if (has_forwarded_objects) {
173 update_roots(true /*full_gc*/);
174 }
175
176 // d. Reset the bitmaps for new marking
177 heap->reset_mark_bitmap();
178 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
179 assert(!heap->marking_context()->is_complete(), "sanity");
180
181 // e. Abandon reference discovery and clear all discovered references.
182 ShenandoahReferenceProcessor* rp = heap->ref_processor();
183 rp->abandon_partial_discovery();
184
185 // f. Sync pinned region status from the CP marks
186 heap->sync_pinned_region_status();
187
188 // The rest of prologue:
189 _preserved_marks->init(heap->workers()->active_workers());
190
191 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
192 }
193
194 if (UseTLAB) {
195 heap->gclabs_retire(ResizeTLAB);
196 heap->tlabs_retire(ResizeTLAB);
197 }
198
199 OrderAccess::fence();
200
201 phase1_mark_heap();
202
203 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
204 // Coming out of Full GC, we would not have any forwarded objects.
205 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
206 heap->set_has_forwarded_objects(false);
207
208 heap->set_full_gc_move_in_progress(true);
209
210 // Setup workers for the rest
211 OrderAccess::fence();
212
213 // Initialize worker slices
214 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
215 for (uint i = 0; i < heap->max_workers(); i++) {
216 worker_slices[i] = new ShenandoahHeapRegionSet();
217 }
218
219 {
220 // The rest of code performs region moves, where region status is undefined
221 // until all phases run together.
222 ShenandoahHeapLocker lock(heap->lock());
223
224 phase2_calculate_target_addresses(worker_slices);
225
226 OrderAccess::fence();
227
228 phase3_update_references();
229
230 phase4_compact_objects(worker_slices);
231 }
232
233 {
234 // Epilogue
235 _preserved_marks->restore(heap->workers());
236 _preserved_marks->reclaim();
237 }
238
239 // Resize metaspace
240 MetaspaceGC::compute_new_size();
241
242 // Free worker slices
243 for (uint i = 0; i < heap->max_workers(); i++) {
244 delete worker_slices[i];
245 }
246 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
247
248 heap->set_full_gc_move_in_progress(false);
249 heap->set_full_gc_in_progress(false);
250
251 if (ShenandoahVerify) {
252 heap->verifier()->verify_after_fullgc();
253 }
254
255 if (VerifyAfterGC) {
256 Universe::verify();
257 }
258
259 {
260 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
261 heap->post_full_gc_dump(_gc_timer);
262 }
263 }
264
265 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
266 private:
267 ShenandoahMarkingContext* const _ctx;
268
269 public:
270 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
271
272 void heap_region_do(ShenandoahHeapRegion *r) {
273 _ctx->capture_top_at_mark_start(r);
274 r->clear_live_data();
275 }
276 };
277
278 void ShenandoahFullGC::phase1_mark_heap() {
279 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
280 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
281
282 ShenandoahHeap* heap = ShenandoahHeap::heap();
283
284 ShenandoahPrepareForMarkClosure cl;
285 heap->heap_region_iterate(&cl);
286
287 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
288
289 ShenandoahReferenceProcessor* rp = heap->ref_processor();
290 // enable ("weak") refs discovery
291 rp->set_soft_reference_policy(true); // forcefully purge all soft references
292
293 ShenandoahSTWMark mark(true /*full_gc*/);
294 mark.mark();
295 heap->parallel_cleaning(true /* full_gc */);
296 }
297
298 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
299 private:
300 PreservedMarks* const _preserved_marks;
301 ShenandoahHeap* const _heap;
302 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
303 int _empty_regions_pos;
304 ShenandoahHeapRegion* _to_region;
305 ShenandoahHeapRegion* _from_region;
306 HeapWord* _compact_point;
307
308 public:
309 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
310 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
311 ShenandoahHeapRegion* to_region) :
312 _preserved_marks(preserved_marks),
313 _heap(ShenandoahHeap::heap()),
314 _empty_regions(empty_regions),
315 _empty_regions_pos(0),
316 _to_region(to_region),
317 _from_region(nullptr),
318 _compact_point(to_region->bottom()) {}
319
320 void set_from_region(ShenandoahHeapRegion* from_region) {
321 _from_region = from_region;
322 }
323
324 void finish_region() {
325 assert(_to_region != nullptr, "should not happen");
326 _to_region->set_new_top(_compact_point);
327 }
328
329 bool is_compact_same_region() {
330 return _from_region == _to_region;
331 }
332
333 int empty_regions_pos() {
334 return _empty_regions_pos;
335 }
336
337 void do_object(oop p) {
338 assert(_from_region != nullptr, "must set before work");
339 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
340 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
341
342 size_t obj_size = p->size();
343 if (_compact_point + obj_size > _to_region->end()) {
344 finish_region();
345
346 // Object doesn't fit. Pick next empty region and start compacting there.
347 ShenandoahHeapRegion* new_to_region;
348 if (_empty_regions_pos < _empty_regions.length()) {
349 new_to_region = _empty_regions.at(_empty_regions_pos);
350 _empty_regions_pos++;
351 } else {
352 // Out of empty region? Compact within the same region.
353 new_to_region = _from_region;
354 }
355
356 assert(new_to_region != _to_region, "must not reuse same to-region");
357 assert(new_to_region != nullptr, "must not be null");
358 _to_region = new_to_region;
359 _compact_point = _to_region->bottom();
360 }
361
362 // Object fits into current region, record new location, if object does not move:
363 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
364 shenandoah_assert_not_forwarded(nullptr, p);
378
379 public:
380 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
381 WorkerTask("Shenandoah Prepare For Compaction"),
382 _preserved_marks(preserved_marks),
383 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
384 }
385
386 static bool is_candidate_region(ShenandoahHeapRegion* r) {
387 // Empty region: get it into the slice to defragment the slice itself.
388 // We could have skipped this without violating correctness, but we really
389 // want to compact all live regions to the start of the heap, which sometimes
390 // means moving them into the fully empty regions.
391 if (r->is_empty()) return true;
392
393 // Can move the region, and this is not the humongous region. Humongous
394 // moves are special cased here, because their moves are handled separately.
395 return r->is_stw_move_allowed() && !r->is_humongous();
396 }
397
398 void work(uint worker_id) {
399 ShenandoahParallelWorkerSession worker_session(worker_id);
400 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
401 ShenandoahHeapRegionSetIterator it(slice);
402 ShenandoahHeapRegion* from_region = it.next();
403 // No work?
404 if (from_region == nullptr) {
405 return;
406 }
407
408 // Sliding compaction. Walk all regions in the slice, and compact them.
409 // Remember empty regions and reuse them as needed.
410 ResourceMark rm;
411
412 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
413
414 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
415
416 while (from_region != nullptr) {
417 assert(is_candidate_region(from_region), "Sanity");
418
419 cl.set_from_region(from_region);
420 if (from_region->has_live()) {
421 _heap->marked_object_iterate(from_region, &cl);
422 }
423
424 // Compacted the region to somewhere else? From-region is empty then.
425 if (!cl.is_compact_same_region()) {
426 empty_regions.append(from_region);
427 }
428 from_region = it.next();
429 }
430 cl.finish_region();
431
432 // Mark all remaining regions as empty
433 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
434 ShenandoahHeapRegion* r = empty_regions.at(pos);
435 r->set_new_top(r->bottom());
436 }
437 }
438 };
439
440 void ShenandoahFullGC::calculate_target_humongous_objects() {
441 ShenandoahHeap* heap = ShenandoahHeap::heap();
442
443 // Compute the new addresses for humongous objects. We need to do this after addresses
444 // for regular objects are calculated, and we know what regions in heap suffix are
445 // available for humongous moves.
446 //
447 // Scan the heap backwards, because we are compacting humongous regions towards the end.
448 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
449 // humongous start there.
450 //
451 // The complication is potential non-movable regions during the scan. If such region is
452 // detected, then sliding restarts towards that non-movable region.
453
454 size_t to_begin = heap->num_regions();
455 size_t to_end = heap->num_regions();
456
457 for (size_t c = heap->num_regions(); c > 0; c--) {
458 ShenandoahHeapRegion *r = heap->get_region(c - 1);
459 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
460 // To-region candidate: record this, and continue scan
461 to_begin = r->index();
462 continue;
463 }
464
465 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
466 // From-region candidate: movable humongous region
467 oop old_obj = cast_to_oop(r->bottom());
468 size_t words_size = old_obj->size();
469 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
470
471 size_t start = to_end - num_regions;
472
473 if (start >= to_begin && start != r->index()) {
474 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
475 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
476 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
479 }
480 }
481
482 // Failed to fit. Scan starting from current region.
483 to_begin = r->index();
484 to_end = r->index();
485 }
486 }
487
488 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
489 private:
490 ShenandoahHeap* const _heap;
491
492 public:
493 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
494 void heap_region_do(ShenandoahHeapRegion* r) {
495 if (r->is_trash()) {
496 r->recycle();
497 }
498 if (r->is_cset()) {
499 r->make_regular_bypass();
500 }
501 if (r->is_empty_uncommitted()) {
502 r->make_committed_bypass();
503 }
504 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
505
506 // Record current region occupancy: this communicates empty regions are free
507 // to the rest of Full GC code.
508 r->set_new_top(r->top());
509 }
510 };
511
512 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
513 private:
514 ShenandoahHeap* const _heap;
515 ShenandoahMarkingContext* const _ctx;
516
517 public:
518 ShenandoahTrashImmediateGarbageClosure() :
519 _heap(ShenandoahHeap::heap()),
520 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
521
522 void heap_region_do(ShenandoahHeapRegion* r) {
523 if (r->is_humongous_start()) {
524 oop humongous_obj = cast_to_oop(r->bottom());
525 if (!_ctx->is_marked(humongous_obj)) {
526 assert(!r->has_live(),
527 "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
528 _heap->trash_humongous_region_at(r);
529 } else {
530 assert(r->has_live(),
531 "Region " SIZE_FORMAT " should have live", r->index());
532 }
533 } else if (r->is_humongous_continuation()) {
534 // If we hit continuation, the non-live humongous starts should have been trashed already
535 assert(r->humongous_start_region()->has_live(),
536 "Region " SIZE_FORMAT " should have live", r->index());
537 } else if (r->is_regular()) {
538 if (!r->has_live()) {
539 r->make_trash_immediate();
540 }
541 }
542 }
543 };
544
545 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
546 ShenandoahHeap* heap = ShenandoahHeap::heap();
547
548 uint n_workers = heap->workers()->active_workers();
549 size_t n_regions = heap->num_regions();
550
551 // What we want to accomplish: have the dense prefix of data, while still balancing
552 // out the parallel work.
553 //
554 // Assuming the amount of work is driven by the live data that needs moving, we can slice
555 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
556 // thread takes all regions in its prefix subset, and then it takes some regions from
679 for (size_t rid = 0; rid < n_regions; rid++) {
680 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
681 bool is_distributed = map.at(rid);
682 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
683 }
684 #endif
685 }
686
687 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
688 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
689 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
690
691 ShenandoahHeap* heap = ShenandoahHeap::heap();
692
693 // About to figure out which regions can be compacted, make sure pinning status
694 // had been updated in GC prologue.
695 heap->assert_pinned_region_status();
696
697 {
698 // Trash the immediately collectible regions before computing addresses
699 ShenandoahTrashImmediateGarbageClosure tigcl;
700 heap->heap_region_iterate(&tigcl);
701
702 // Make sure regions are in good state: committed, active, clean.
703 // This is needed because we are potentially sliding the data through them.
704 ShenandoahEnsureHeapActiveClosure ecl;
705 heap->heap_region_iterate(&ecl);
706 }
707
708 // Compute the new addresses for regular objects
709 {
710 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
711
712 distribute_slices(worker_slices);
713
714 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
715 heap->workers()->run_task(&task);
716 }
717
718 // Compute the new addresses for humongous objects
719 {
720 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
768
769 class ShenandoahAdjustPointersTask : public WorkerTask {
770 private:
771 ShenandoahHeap* const _heap;
772 ShenandoahRegionIterator _regions;
773
774 public:
775 ShenandoahAdjustPointersTask() :
776 WorkerTask("Shenandoah Adjust Pointers"),
777 _heap(ShenandoahHeap::heap()) {
778 }
779
780 void work(uint worker_id) {
781 ShenandoahParallelWorkerSession worker_session(worker_id);
782 ShenandoahAdjustPointersObjectClosure obj_cl;
783 ShenandoahHeapRegion* r = _regions.next();
784 while (r != nullptr) {
785 if (!r->is_humongous_continuation() && r->has_live()) {
786 _heap->marked_object_iterate(r, &obj_cl);
787 }
788 r = _regions.next();
789 }
790 }
791 };
792
793 class ShenandoahAdjustRootPointersTask : public WorkerTask {
794 private:
795 ShenandoahRootAdjuster* _rp;
796 PreservedMarksSet* _preserved_marks;
797 public:
798 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
799 WorkerTask("Shenandoah Adjust Root Pointers"),
800 _rp(rp),
801 _preserved_marks(preserved_marks) {}
802
803 void work(uint worker_id) {
804 ShenandoahParallelWorkerSession worker_session(worker_id);
805 ShenandoahAdjustPointersClosure cl;
806 _rp->roots_do(worker_id, &cl);
807 _preserved_marks->get(worker_id)->adjust_during_full_gc();
872 void work(uint worker_id) {
873 ShenandoahParallelWorkerSession worker_session(worker_id);
874 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
875
876 ShenandoahCompactObjectsClosure cl(worker_id);
877 ShenandoahHeapRegion* r = slice.next();
878 while (r != nullptr) {
879 assert(!r->is_humongous(), "must not get humongous regions here");
880 if (r->has_live()) {
881 _heap->marked_object_iterate(r, &cl);
882 }
883 r->set_top(r->new_top());
884 r = slice.next();
885 }
886 }
887 };
888
889 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
890 private:
891 ShenandoahHeap* const _heap;
892 size_t _live;
893
894 public:
895 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
896 _heap->free_set()->clear();
897 }
898
899 void heap_region_do(ShenandoahHeapRegion* r) {
900 assert (!r->is_cset(), "cset regions should have been demoted already");
901
902 // Need to reset the complete-top-at-mark-start pointer here because
903 // the complete marking bitmap is no longer valid. This ensures
904 // size-based iteration in marked_object_iterate().
905 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
906 // pinned regions.
907 if (!r->is_pinned()) {
908 _heap->complete_marking_context()->reset_top_at_mark_start(r);
909 }
910
911 size_t live = r->used();
912
913 // Make empty regions that have been allocated into regular
914 if (r->is_empty() && live > 0) {
915 r->make_regular_bypass();
916 if (ZapUnusedHeapArea) {
917 SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
918 }
919 }
920
921 // Reclaim regular regions that became empty
922 if (r->is_regular() && live == 0) {
923 r->make_trash();
924 }
925
926 // Recycle all trash regions
927 if (r->is_trash()) {
928 live = 0;
929 r->recycle();
930 }
931
932 r->set_live_data(live);
933 r->reset_alloc_metadata();
934 _live += live;
935 }
936
937 size_t get_live() {
938 return _live;
939 }
940 };
941
942 void ShenandoahFullGC::compact_humongous_objects() {
943 // Compact humongous regions, based on their fwdptr objects.
944 //
945 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
946 // humongous regions are already compacted, and do not require further moves, which alleviates
947 // sliding costs. We may consider doing this in parallel in future.
948
949 ShenandoahHeap* heap = ShenandoahHeap::heap();
950
951 for (size_t c = heap->num_regions(); c > 0; c--) {
952 ShenandoahHeapRegion* r = heap->get_region(c - 1);
953 if (r->is_humongous_start()) {
954 oop old_obj = cast_to_oop(r->bottom());
955 if (!old_obj->is_forwarded()) {
956 // No need to move the object, it stays at the same slot
957 continue;
958 }
959 size_t words_size = old_obj->size();
960 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
961
962 size_t old_start = r->index();
963 size_t old_end = old_start + num_regions - 1;
964 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
965 size_t new_end = new_start + num_regions - 1;
966 assert(old_start != new_start, "must be real move");
967 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
968
969 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
970 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
971
972 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
973 new_obj->init_mark();
974
975 {
976 for (size_t c = old_start; c <= old_end; c++) {
977 ShenandoahHeapRegion* r = heap->get_region(c);
978 r->make_regular_bypass();
979 r->set_top(r->bottom());
980 }
981
982 for (size_t c = new_start; c <= new_end; c++) {
983 ShenandoahHeapRegion* r = heap->get_region(c);
984 if (c == new_start) {
985 r->make_humongous_start_bypass();
986 } else {
987 r->make_humongous_cont_bypass();
988 }
989
990 // Trailing region may be non-full, record the remainder there
991 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
992 if ((c == new_end) && (remainder != 0)) {
993 r->set_top(r->bottom() + remainder);
994 } else {
995 r->set_top(r->end());
996 }
997
998 r->reset_alloc_metadata();
999 }
1000 }
1001 }
1002 }
1003 }
1004
1005 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1006 // we need to remain able to walk pinned regions.
1007 // Since pinned region do not move and don't get compacted, we will get holes with
1033 };
1034
1035 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1036 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1037 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1038
1039 ShenandoahHeap* heap = ShenandoahHeap::heap();
1040
1041 // Compact regular objects first
1042 {
1043 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1044 ShenandoahCompactObjectsTask compact_task(worker_slices);
1045 heap->workers()->run_task(&compact_task);
1046 }
1047
1048 // Compact humongous objects after regular object moves
1049 {
1050 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1051 compact_humongous_objects();
1052 }
1053
1054 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1055 // and must ensure the bitmap is in sync.
1056 {
1057 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1058 ShenandoahMCResetCompleteBitmapTask task;
1059 heap->workers()->run_task(&task);
1060 }
1061
1062 // Bring regions in proper states after the collection, and set heap properties.
1063 {
1064 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1065
1066 ShenandoahPostCompactClosure post_compact;
1067 heap->heap_region_iterate(&post_compact);
1068 heap->set_used(post_compact.get_live());
1069
1070 heap->collection_set()->clear();
1071 heap->free_set()->rebuild();
1072 }
1073
1074 heap->clear_cancelled_gc();
1075 }
|
1 /*
2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/continuationGCSupport.hpp"
30 #include "gc/shared/gcTraceTime.inline.hpp"
31 #include "gc/shared/preservedMarks.inline.hpp"
32 #include "gc/shared/tlab_globals.hpp"
33 #include "gc/shared/workerThread.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
36 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
40 #include "gc/shenandoah/shenandoahFullGC.hpp"
41 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
42 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
44 #include "gc/shenandoah/shenandoahMark.inline.hpp"
45 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
51 #include "gc/shenandoah/shenandoahMetrics.hpp"
52 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
53 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
54 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
55 #include "gc/shenandoah/shenandoahSTWMark.hpp"
56 #include "gc/shenandoah/shenandoahUtils.hpp"
57 #include "gc/shenandoah/shenandoahVerifier.hpp"
58 #include "gc/shenandoah/shenandoahVMOperations.hpp"
59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
60 #include "memory/metaspaceUtils.hpp"
61 #include "memory/universe.hpp"
62 #include "oops/compressedOops.inline.hpp"
63 #include "oops/oop.inline.hpp"
64 #include "runtime/orderAccess.hpp"
65 #include "runtime/vmThread.hpp"
66 #include "utilities/copy.hpp"
67 #include "utilities/events.hpp"
68 #include "utilities/growableArray.hpp"
69
70 ShenandoahFullGC::ShenandoahFullGC() :
71 _gc_timer(ShenandoahHeap::heap()->gc_timer()),
72 _preserved_marks(new PreservedMarksSet(true)) {}
73
74 ShenandoahFullGC::~ShenandoahFullGC() {
75 delete _preserved_marks;
76 }
77
78 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
79 vmop_entry_full(cause);
80 // Always success
81 return true;
82 }
83
93
94 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
95 static const char* msg = "Pause Full";
96 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
97 EventMark em("%s", msg);
98
99 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
100 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
101 "full gc");
102
103 op_full(cause);
104 }
105
106 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
107 ShenandoahMetricsSnapshot metrics;
108 metrics.snap_before();
109
110 // Perform full GC
111 do_it(cause);
112
113 ShenandoahHeap* const heap = ShenandoahHeap::heap();
114
115 if (heap->mode()->is_generational()) {
116 ShenandoahGenerationalFullGC::handle_completion(heap);
117 }
118
119 metrics.snap_after();
120
121 if (metrics.is_good_progress()) {
122 heap->notify_gc_progress();
123 } else {
124 // Nothing to do. Tell the allocation path that we have failed to make
125 // progress, and it can finally fail.
126 heap->notify_gc_no_progress();
127 }
128
129 // Regardless if progress was made, we record that we completed a "successful" full GC.
130 heap->global_generation()->heuristics()->record_success_full();
131 heap->shenandoah_policy()->record_success_full();
132 }
133
134 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
135 ShenandoahHeap* heap = ShenandoahHeap::heap();
136
137 if (heap->mode()->is_generational()) {
138 ShenandoahGenerationalFullGC::prepare();
139 }
140
141 if (ShenandoahVerify) {
142 heap->verifier()->verify_before_fullgc();
143 }
144
145 if (VerifyBeforeGC) {
146 Universe::verify();
147 }
148
149 // Degenerated GC may carry concurrent root flags when upgrading to
150 // full GC. We need to reset it before mutators resume.
151 heap->set_concurrent_strong_root_in_progress(false);
152 heap->set_concurrent_weak_root_in_progress(false);
153
154 heap->set_full_gc_in_progress(true);
155
156 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
157 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
158
159 {
160 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
163
164 {
165 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
166 // Full GC is supposed to recover from any GC state:
167
168 // a0. Remember if we have forwarded objects
169 bool has_forwarded_objects = heap->has_forwarded_objects();
170
171 // a1. Cancel evacuation, if in progress
172 if (heap->is_evacuation_in_progress()) {
173 heap->set_evacuation_in_progress(false);
174 }
175 assert(!heap->is_evacuation_in_progress(), "sanity");
176
177 // a2. Cancel update-refs, if in progress
178 if (heap->is_update_refs_in_progress()) {
179 heap->set_update_refs_in_progress(false);
180 }
181 assert(!heap->is_update_refs_in_progress(), "sanity");
182
183 // b. Cancel all concurrent marks, if in progress
184 if (heap->is_concurrent_mark_in_progress()) {
185 heap->cancel_concurrent_mark();
186 }
187 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
188
189 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
190 if (has_forwarded_objects) {
191 update_roots(true /*full_gc*/);
192 }
193
194 // d. Reset the bitmaps for new marking
195 heap->global_generation()->reset_mark_bitmap();
196 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
197 assert(!heap->global_generation()->is_mark_complete(), "sanity");
198
199 // e. Abandon reference discovery and clear all discovered references.
200 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
201 rp->abandon_partial_discovery();
202
203 // f. Sync pinned region status from the CP marks
204 heap->sync_pinned_region_status();
205
206 if (heap->mode()->is_generational()) {
207 ShenandoahGenerationalFullGC::restore_top_before_promote(heap);
208 }
209
210 // The rest of prologue:
211 _preserved_marks->init(heap->workers()->active_workers());
212
213 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
214 }
215
216 if (UseTLAB) {
217 // Note: PLABs are also retired with GCLABs in generational mode.
218 heap->gclabs_retire(ResizeTLAB);
219 heap->tlabs_retire(ResizeTLAB);
220 }
221
222 OrderAccess::fence();
223
224 phase1_mark_heap();
225
226 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
227 // Coming out of Full GC, we would not have any forwarded objects.
228 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
229 heap->set_has_forwarded_objects(false);
230
231 heap->set_full_gc_move_in_progress(true);
232
233 // Setup workers for the rest
234 OrderAccess::fence();
235
236 // Initialize worker slices
237 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
238 for (uint i = 0; i < heap->max_workers(); i++) {
239 worker_slices[i] = new ShenandoahHeapRegionSet();
240 }
241
242 {
243 // The rest of code performs region moves, where region status is undefined
244 // until all phases run together.
245 ShenandoahHeapLocker lock(heap->lock());
246
247 phase2_calculate_target_addresses(worker_slices);
248
249 OrderAccess::fence();
250
251 phase3_update_references();
252
253 phase4_compact_objects(worker_slices);
254
255 phase5_epilog();
256 }
257
258 // Resize metaspace
259 MetaspaceGC::compute_new_size();
260
261 // Free worker slices
262 for (uint i = 0; i < heap->max_workers(); i++) {
263 delete worker_slices[i];
264 }
265 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
266
267 heap->set_full_gc_move_in_progress(false);
268 heap->set_full_gc_in_progress(false);
269
270 if (ShenandoahVerify) {
271 heap->verifier()->verify_after_fullgc();
272 }
273
274 if (VerifyAfterGC) {
275 Universe::verify();
276 }
277
278 {
279 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
280 heap->post_full_gc_dump(_gc_timer);
281 }
282 }
283
284 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
285 private:
286 ShenandoahMarkingContext* const _ctx;
287
288 public:
289 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
290
291 void heap_region_do(ShenandoahHeapRegion *r) override {
292 _ctx->capture_top_at_mark_start(r);
293 r->clear_live_data();
294 }
295
296 bool is_thread_safe() override { return true; }
297 };
298
299 void ShenandoahFullGC::phase1_mark_heap() {
300 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
301 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
302
303 ShenandoahHeap* heap = ShenandoahHeap::heap();
304
305 ShenandoahPrepareForMarkClosure prepare_for_mark;
306 ShenandoahExcludeRegionClosure<FREE> cl(&prepare_for_mark);
307 heap->parallel_heap_region_iterate(&cl);
308
309 heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
310
311 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
312 // enable ("weak") refs discovery
313 rp->set_soft_reference_policy(true); // forcefully purge all soft references
314
315 ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
316 mark.mark();
317 heap->parallel_cleaning(true /* full_gc */);
318
319 if (ShenandoahHeap::heap()->mode()->is_generational()) {
320 ShenandoahGenerationalFullGC::log_live_in_old(heap);
321 }
322 }
323
324 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
325 private:
326 PreservedMarks* const _preserved_marks;
327 ShenandoahHeap* const _heap;
328 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
329 int _empty_regions_pos;
330 ShenandoahHeapRegion* _to_region;
331 ShenandoahHeapRegion* _from_region;
332 HeapWord* _compact_point;
333
334 public:
335 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
336 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
337 ShenandoahHeapRegion* to_region) :
338 _preserved_marks(preserved_marks),
339 _heap(ShenandoahHeap::heap()),
340 _empty_regions(empty_regions),
341 _empty_regions_pos(0),
342 _to_region(to_region),
343 _from_region(nullptr),
344 _compact_point(to_region->bottom()) {}
345
346 void set_from_region(ShenandoahHeapRegion* from_region) {
347 _from_region = from_region;
348 }
349
350 void finish() {
351 assert(_to_region != nullptr, "should not happen");
352 _to_region->set_new_top(_compact_point);
353 }
354
355 bool is_compact_same_region() {
356 return _from_region == _to_region;
357 }
358
359 int empty_regions_pos() {
360 return _empty_regions_pos;
361 }
362
363 void do_object(oop p) {
364 assert(_from_region != nullptr, "must set before work");
365 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
366 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
367
368 size_t obj_size = p->size();
369 if (_compact_point + obj_size > _to_region->end()) {
370 finish();
371
372 // Object doesn't fit. Pick next empty region and start compacting there.
373 ShenandoahHeapRegion* new_to_region;
374 if (_empty_regions_pos < _empty_regions.length()) {
375 new_to_region = _empty_regions.at(_empty_regions_pos);
376 _empty_regions_pos++;
377 } else {
378 // Out of empty region? Compact within the same region.
379 new_to_region = _from_region;
380 }
381
382 assert(new_to_region != _to_region, "must not reuse same to-region");
383 assert(new_to_region != nullptr, "must not be null");
384 _to_region = new_to_region;
385 _compact_point = _to_region->bottom();
386 }
387
388 // Object fits into current region, record new location, if object does not move:
389 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
390 shenandoah_assert_not_forwarded(nullptr, p);
404
405 public:
406 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
407 WorkerTask("Shenandoah Prepare For Compaction"),
408 _preserved_marks(preserved_marks),
409 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
410 }
411
412 static bool is_candidate_region(ShenandoahHeapRegion* r) {
413 // Empty region: get it into the slice to defragment the slice itself.
414 // We could have skipped this without violating correctness, but we really
415 // want to compact all live regions to the start of the heap, which sometimes
416 // means moving them into the fully empty regions.
417 if (r->is_empty()) return true;
418
419 // Can move the region, and this is not the humongous region. Humongous
420 // moves are special cased here, because their moves are handled separately.
421 return r->is_stw_move_allowed() && !r->is_humongous();
422 }
423
424 void work(uint worker_id) override;
425 private:
426 template<typename ClosureType>
427 void prepare_for_compaction(ClosureType& cl,
428 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
429 ShenandoahHeapRegionSetIterator& it,
430 ShenandoahHeapRegion* from_region);
431 };
432
433 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
434 ShenandoahParallelWorkerSession worker_session(worker_id);
435 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
436 ShenandoahHeapRegionSetIterator it(slice);
437 ShenandoahHeapRegion* from_region = it.next();
438 // No work?
439 if (from_region == nullptr) {
440 return;
441 }
442
443 // Sliding compaction. Walk all regions in the slice, and compact them.
444 // Remember empty regions and reuse them as needed.
445 ResourceMark rm;
446
447 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
448
449 if (_heap->mode()->is_generational()) {
450 ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id),
451 empty_regions, from_region, worker_id);
452 prepare_for_compaction(cl, empty_regions, it, from_region);
453 } else {
454 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
455 prepare_for_compaction(cl, empty_regions, it, from_region);
456 }
457 }
458
459 template<typename ClosureType>
460 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl,
461 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
462 ShenandoahHeapRegionSetIterator& it,
463 ShenandoahHeapRegion* from_region) {
464 while (from_region != nullptr) {
465 assert(is_candidate_region(from_region), "Sanity");
466 cl.set_from_region(from_region);
467 if (from_region->has_live()) {
468 _heap->marked_object_iterate(from_region, &cl);
469 }
470
471 // Compacted the region to somewhere else? From-region is empty then.
472 if (!cl.is_compact_same_region()) {
473 empty_regions.append(from_region);
474 }
475 from_region = it.next();
476 }
477 cl.finish();
478
479 // Mark all remaining regions as empty
480 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
481 ShenandoahHeapRegion* r = empty_regions.at(pos);
482 r->set_new_top(r->bottom());
483 }
484 }
485
486 void ShenandoahFullGC::calculate_target_humongous_objects() {
487 ShenandoahHeap* heap = ShenandoahHeap::heap();
488
489 // Compute the new addresses for humongous objects. We need to do this after addresses
490 // for regular objects are calculated, and we know what regions in heap suffix are
491 // available for humongous moves.
492 //
493 // Scan the heap backwards, because we are compacting humongous regions towards the end.
494 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
495 // humongous start there.
496 //
497 // The complication is potential non-movable regions during the scan. If such region is
498 // detected, then sliding restarts towards that non-movable region.
499
500 size_t to_begin = heap->num_regions();
501 size_t to_end = heap->num_regions();
502
503 log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
504 for (size_t c = heap->num_regions(); c > 0; c--) {
505 ShenandoahHeapRegion *r = heap->get_region(c - 1);
506 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
507 // To-region candidate: record this, and continue scan
508 to_begin = r->index();
509 continue;
510 }
511
512 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
513 // From-region candidate: movable humongous region
514 oop old_obj = cast_to_oop(r->bottom());
515 size_t words_size = old_obj->size();
516 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
517
518 size_t start = to_end - num_regions;
519
520 if (start >= to_begin && start != r->index()) {
521 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
522 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
523 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
526 }
527 }
528
529 // Failed to fit. Scan starting from current region.
530 to_begin = r->index();
531 to_end = r->index();
532 }
533 }
534
535 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
536 private:
537 ShenandoahHeap* const _heap;
538
539 public:
540 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
541 void heap_region_do(ShenandoahHeapRegion* r) {
542 if (r->is_trash()) {
543 r->recycle();
544 }
545 if (r->is_cset()) {
546 // Leave affiliation unchanged
547 r->make_regular_bypass();
548 }
549 if (r->is_empty_uncommitted()) {
550 r->make_committed_bypass();
551 }
552 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
553
554 // Record current region occupancy: this communicates empty regions are free
555 // to the rest of Full GC code.
556 r->set_new_top(r->top());
557 }
558 };
559
560 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
561 private:
562 ShenandoahHeap* const _heap;
563 ShenandoahMarkingContext* const _ctx;
564
565 public:
566 ShenandoahTrashImmediateGarbageClosure() :
567 _heap(ShenandoahHeap::heap()),
568 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
569
570 void heap_region_do(ShenandoahHeapRegion* r) override {
571 if (r->is_humongous_start()) {
572 oop humongous_obj = cast_to_oop(r->bottom());
573 if (!_ctx->is_marked(humongous_obj)) {
574 assert(!r->has_live(), "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
575 _heap->trash_humongous_region_at(r);
576 } else {
577 assert(r->has_live(), "Region " SIZE_FORMAT " should have live", r->index());
578 }
579 } else if (r->is_humongous_continuation()) {
580 // If we hit continuation, the non-live humongous starts should have been trashed already
581 assert(r->humongous_start_region()->has_live(), "Region " SIZE_FORMAT " should have live", r->index());
582 } else if (r->is_regular()) {
583 if (!r->has_live()) {
584 r->make_trash_immediate();
585 }
586 }
587 }
588 };
589
590 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
591 ShenandoahHeap* heap = ShenandoahHeap::heap();
592
593 uint n_workers = heap->workers()->active_workers();
594 size_t n_regions = heap->num_regions();
595
596 // What we want to accomplish: have the dense prefix of data, while still balancing
597 // out the parallel work.
598 //
599 // Assuming the amount of work is driven by the live data that needs moving, we can slice
600 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
601 // thread takes all regions in its prefix subset, and then it takes some regions from
724 for (size_t rid = 0; rid < n_regions; rid++) {
725 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
726 bool is_distributed = map.at(rid);
727 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
728 }
729 #endif
730 }
731
732 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
733 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
734 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
735
736 ShenandoahHeap* heap = ShenandoahHeap::heap();
737
738 // About to figure out which regions can be compacted, make sure pinning status
739 // had been updated in GC prologue.
740 heap->assert_pinned_region_status();
741
742 {
743 // Trash the immediately collectible regions before computing addresses
744 ShenandoahTrashImmediateGarbageClosure trash_immediate_garbage;
745 ShenandoahExcludeRegionClosure<FREE> cl(&trash_immediate_garbage);
746 heap->heap_region_iterate(&cl);
747
748 // Make sure regions are in good state: committed, active, clean.
749 // This is needed because we are potentially sliding the data through them.
750 ShenandoahEnsureHeapActiveClosure ecl;
751 heap->heap_region_iterate(&ecl);
752 }
753
754 // Compute the new addresses for regular objects
755 {
756 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
757
758 distribute_slices(worker_slices);
759
760 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
761 heap->workers()->run_task(&task);
762 }
763
764 // Compute the new addresses for humongous objects
765 {
766 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
814
815 class ShenandoahAdjustPointersTask : public WorkerTask {
816 private:
817 ShenandoahHeap* const _heap;
818 ShenandoahRegionIterator _regions;
819
820 public:
821 ShenandoahAdjustPointersTask() :
822 WorkerTask("Shenandoah Adjust Pointers"),
823 _heap(ShenandoahHeap::heap()) {
824 }
825
826 void work(uint worker_id) {
827 ShenandoahParallelWorkerSession worker_session(worker_id);
828 ShenandoahAdjustPointersObjectClosure obj_cl;
829 ShenandoahHeapRegion* r = _regions.next();
830 while (r != nullptr) {
831 if (!r->is_humongous_continuation() && r->has_live()) {
832 _heap->marked_object_iterate(r, &obj_cl);
833 }
834 if (_heap->mode()->is_generational()) {
835 ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(r);
836 }
837 r = _regions.next();
838 }
839 }
840 };
841
842 class ShenandoahAdjustRootPointersTask : public WorkerTask {
843 private:
844 ShenandoahRootAdjuster* _rp;
845 PreservedMarksSet* _preserved_marks;
846 public:
847 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
848 WorkerTask("Shenandoah Adjust Root Pointers"),
849 _rp(rp),
850 _preserved_marks(preserved_marks) {}
851
852 void work(uint worker_id) {
853 ShenandoahParallelWorkerSession worker_session(worker_id);
854 ShenandoahAdjustPointersClosure cl;
855 _rp->roots_do(worker_id, &cl);
856 _preserved_marks->get(worker_id)->adjust_during_full_gc();
921 void work(uint worker_id) {
922 ShenandoahParallelWorkerSession worker_session(worker_id);
923 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
924
925 ShenandoahCompactObjectsClosure cl(worker_id);
926 ShenandoahHeapRegion* r = slice.next();
927 while (r != nullptr) {
928 assert(!r->is_humongous(), "must not get humongous regions here");
929 if (r->has_live()) {
930 _heap->marked_object_iterate(r, &cl);
931 }
932 r->set_top(r->new_top());
933 r = slice.next();
934 }
935 }
936 };
937
938 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
939 private:
940 ShenandoahHeap* const _heap;
941 bool _is_generational;
942 size_t _young_regions, _young_usage, _young_humongous_waste;
943 size_t _old_regions, _old_usage, _old_humongous_waste;
944
945 public:
946 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()),
947 _is_generational(_heap->mode()->is_generational()),
948 _young_regions(0),
949 _young_usage(0),
950 _young_humongous_waste(0),
951 _old_regions(0),
952 _old_usage(0),
953 _old_humongous_waste(0)
954 {
955 _heap->free_set()->clear();
956 }
957
958 void heap_region_do(ShenandoahHeapRegion* r) {
959 assert (!r->is_cset(), "cset regions should have been demoted already");
960
961 // Need to reset the complete-top-at-mark-start pointer here because
962 // the complete marking bitmap is no longer valid. This ensures
963 // size-based iteration in marked_object_iterate().
964 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
965 // pinned regions.
966 if (!r->is_pinned()) {
967 _heap->complete_marking_context()->reset_top_at_mark_start(r);
968 }
969
970 size_t live = r->used();
971
972 // Make empty regions that have been allocated into regular
973 if (r->is_empty() && live > 0) {
974 if (!_is_generational) {
975 r->make_affiliated_maybe();
976 }
977 // else, generational mode compaction has already established affiliation.
978 r->make_regular_bypass();
979 if (ZapUnusedHeapArea) {
980 SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
981 }
982 }
983
984 // Reclaim regular regions that became empty
985 if (r->is_regular() && live == 0) {
986 r->make_trash();
987 }
988
989 // Recycle all trash regions
990 if (r->is_trash()) {
991 live = 0;
992 r->recycle();
993 } else {
994 if (r->is_old()) {
995 ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
996 } else if (r->is_young()) {
997 ShenandoahGenerationalFullGC::account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
998 }
999 }
1000 r->set_live_data(live);
1001 r->reset_alloc_metadata();
1002 }
1003
1004 void update_generation_usage() {
1005 if (_is_generational) {
1006 _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste);
1007 _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste);
1008 } else {
1009 assert(_old_regions == 0, "Old regions only expected in generational mode");
1010 assert(_old_usage == 0, "Old usage only expected in generational mode");
1011 assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode");
1012 }
1013
1014 // In generational mode, global usage should be the sum of young and old. This is also true
1015 // for non-generational modes except that there are no old regions.
1016 _heap->global_generation()->establish_usage(_old_regions + _young_regions,
1017 _old_usage + _young_usage,
1018 _old_humongous_waste + _young_humongous_waste);
1019 }
1020 };
1021
1022 void ShenandoahFullGC::compact_humongous_objects() {
1023 // Compact humongous regions, based on their fwdptr objects.
1024 //
1025 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1026 // humongous regions are already compacted, and do not require further moves, which alleviates
1027 // sliding costs. We may consider doing this in parallel in the future.
1028
1029 ShenandoahHeap* heap = ShenandoahHeap::heap();
1030
1031 for (size_t c = heap->num_regions(); c > 0; c--) {
1032 ShenandoahHeapRegion* r = heap->get_region(c - 1);
1033 if (r->is_humongous_start()) {
1034 oop old_obj = cast_to_oop(r->bottom());
1035 if (!old_obj->is_forwarded()) {
1036 // No need to move the object, it stays at the same slot
1037 continue;
1038 }
1039 size_t words_size = old_obj->size();
1040 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1041
1042 size_t old_start = r->index();
1043 size_t old_end = old_start + num_regions - 1;
1044 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1045 size_t new_end = new_start + num_regions - 1;
1046 assert(old_start != new_start, "must be real move");
1047 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1048
1049 log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, old_start, new_start);
1050 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1051 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1052
1053 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1054 new_obj->init_mark();
1055
1056 {
1057 ShenandoahAffiliation original_affiliation = r->affiliation();
1058 for (size_t c = old_start; c <= old_end; c++) {
1059 ShenandoahHeapRegion* r = heap->get_region(c);
1060 // Leave humongous region affiliation unchanged.
1061 r->make_regular_bypass();
1062 r->set_top(r->bottom());
1063 }
1064
1065 for (size_t c = new_start; c <= new_end; c++) {
1066 ShenandoahHeapRegion* r = heap->get_region(c);
1067 if (c == new_start) {
1068 r->make_humongous_start_bypass(original_affiliation);
1069 } else {
1070 r->make_humongous_cont_bypass(original_affiliation);
1071 }
1072
1073 // Trailing region may be non-full, record the remainder there
1074 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1075 if ((c == new_end) && (remainder != 0)) {
1076 r->set_top(r->bottom() + remainder);
1077 } else {
1078 r->set_top(r->end());
1079 }
1080
1081 r->reset_alloc_metadata();
1082 }
1083 }
1084 }
1085 }
1086 }
1087
1088 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1089 // we need to remain able to walk pinned regions.
1090 // Since pinned region do not move and don't get compacted, we will get holes with
1116 };
1117
1118 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1119 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1120 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1121
1122 ShenandoahHeap* heap = ShenandoahHeap::heap();
1123
1124 // Compact regular objects first
1125 {
1126 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1127 ShenandoahCompactObjectsTask compact_task(worker_slices);
1128 heap->workers()->run_task(&compact_task);
1129 }
1130
1131 // Compact humongous objects after regular object moves
1132 {
1133 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1134 compact_humongous_objects();
1135 }
1136 }
1137
1138 void ShenandoahFullGC::phase5_epilog() {
1139 GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
1140 ShenandoahHeap* heap = ShenandoahHeap::heap();
1141
1142 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1143 // and must ensure the bitmap is in sync.
1144 {
1145 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1146 ShenandoahMCResetCompleteBitmapTask task;
1147 heap->workers()->run_task(&task);
1148 }
1149
1150 // Bring regions in proper states after the collection, and set heap properties.
1151 {
1152 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1153 ShenandoahPostCompactClosure post_compact;
1154 heap->heap_region_iterate(&post_compact);
1155 post_compact.update_generation_usage();
1156
1157 if (heap->mode()->is_generational()) {
1158 ShenandoahGenerationalFullGC::balance_generations_after_gc(heap);
1159 }
1160
1161 heap->collection_set()->clear();
1162 size_t young_cset_regions, old_cset_regions;
1163 size_t first_old, last_old, num_old;
1164 heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
1165
1166 // We also do not expand old generation size following Full GC because we have scrambled age populations and
1167 // no longer have objects separated by age into distinct regions.
1168 if (heap->mode()->is_generational()) {
1169 ShenandoahGenerationalFullGC::compute_balances();
1170 }
1171
1172 heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
1173
1174 heap->clear_cancelled_gc(true /* clear oom handler */);
1175 }
1176
1177 _preserved_marks->restore(heap->workers());
1178 _preserved_marks->reclaim();
1179
1180 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
1181 // abbreviated cycle.
1182 if (heap->mode()->is_generational()) {
1183 ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set();
1184 ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
1185 }
1186 }
|