1 /*
2 * Copyright (c) 2014, 2021, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27
28 #include "compiler/oopMap.hpp"
29 #include "gc/shared/continuationGCSupport.hpp"
30 #include "gc/shared/gcTraceTime.inline.hpp"
31 #include "gc/shared/preservedMarks.inline.hpp"
32 #include "gc/shared/tlab_globals.hpp"
33 #include "gc/shared/workerThread.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
36 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
40 #include "gc/shenandoah/shenandoahFullGC.hpp"
41 #include "gc/shenandoah/shenandoahGenerationalFullGC.hpp"
42 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
44 #include "gc/shenandoah/shenandoahMark.inline.hpp"
45 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
50 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
51 #include "gc/shenandoah/shenandoahMetrics.hpp"
52 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
53 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
54 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
55 #include "gc/shenandoah/shenandoahSTWMark.hpp"
56 #include "gc/shenandoah/shenandoahUtils.hpp"
57 #include "gc/shenandoah/shenandoahVerifier.hpp"
58 #include "gc/shenandoah/shenandoahVMOperations.hpp"
59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
60 #include "memory/metaspaceUtils.hpp"
61 #include "memory/universe.hpp"
62 #include "oops/compressedOops.inline.hpp"
63 #include "oops/oop.inline.hpp"
64 #include "runtime/orderAccess.hpp"
65 #include "runtime/vmThread.hpp"
66 #include "utilities/copy.hpp"
67 #include "utilities/events.hpp"
68 #include "utilities/growableArray.hpp"
69
70 ShenandoahFullGC::ShenandoahFullGC() :
71 _gc_timer(ShenandoahHeap::heap()->gc_timer()),
72 _preserved_marks(new PreservedMarksSet(true)) {}
73
74 ShenandoahFullGC::~ShenandoahFullGC() {
75 delete _preserved_marks;
76 }
77
78 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
79 vmop_entry_full(cause);
80 // Always success
81 return true;
82 }
83
84 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
85 ShenandoahHeap* const heap = ShenandoahHeap::heap();
86 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
87 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
88
89 heap->try_inject_alloc_failure();
90 VM_ShenandoahFullGC op(cause, this);
91 VMThread::execute(&op);
92 }
93
94 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
95 static const char* msg = "Pause Full";
96 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
97 EventMark em("%s", msg);
98
99 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
100 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
101 "full gc");
102
103 op_full(cause);
104 }
105
106 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
107 ShenandoahMetricsSnapshot metrics;
108 metrics.snap_before();
109
110 // Perform full GC
111 do_it(cause);
112
113 ShenandoahHeap* const heap = ShenandoahHeap::heap();
114
115 if (heap->mode()->is_generational()) {
116 ShenandoahGenerationalFullGC::handle_completion(heap);
117 }
118
119 metrics.snap_after();
120
121 if (metrics.is_good_progress(heap->global_generation())) {
122 heap->notify_gc_progress();
123 } else {
124 // Nothing to do. Tell the allocation path that we have failed to make
125 // progress, and it can finally fail.
126 heap->notify_gc_no_progress();
127 }
128
129 // Regardless if progress was made, we record that we completed a "successful" full GC.
130 heap->global_generation()->heuristics()->record_success_full();
131 heap->shenandoah_policy()->record_success_full();
132
133 {
134 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_propagate_gc_state);
135 heap->propagate_gc_state_to_all_threads();
136 }
137 }
138
139 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
140 ShenandoahHeap* heap = ShenandoahHeap::heap();
141
142 if (heap->mode()->is_generational()) {
143 ShenandoahGenerationalFullGC::prepare();
144 }
145
146 if (ShenandoahVerify) {
147 heap->verifier()->verify_before_fullgc();
148 }
149
150 if (VerifyBeforeGC) {
151 Universe::verify();
152 }
153
154 // Degenerated GC may carry concurrent root flags when upgrading to
155 // full GC. We need to reset it before mutators resume.
156 heap->set_concurrent_strong_root_in_progress(false);
157 heap->set_concurrent_weak_root_in_progress(false);
158
159 heap->set_full_gc_in_progress(true);
160
161 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
162 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
163
164 {
165 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre);
166 heap->pre_full_gc_dump(_gc_timer);
167 }
168
169 {
170 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
171 // Full GC is supposed to recover from any GC state:
172
173 // a0. Remember if we have forwarded objects
174 bool has_forwarded_objects = heap->has_forwarded_objects();
175
176 // a1. Cancel evacuation, if in progress
177 if (heap->is_evacuation_in_progress()) {
178 heap->set_evacuation_in_progress(false);
179 }
180 assert(!heap->is_evacuation_in_progress(), "sanity");
181
182 // a2. Cancel update-refs, if in progress
183 if (heap->is_update_refs_in_progress()) {
184 heap->set_update_refs_in_progress(false);
185 }
186 assert(!heap->is_update_refs_in_progress(), "sanity");
187
188 // b. Cancel all concurrent marks, if in progress
189 if (heap->is_concurrent_mark_in_progress()) {
190 heap->cancel_concurrent_mark();
191 }
192 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
193
194 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
195 if (has_forwarded_objects) {
196 update_roots(true /*full_gc*/);
197 }
198
199 // d. Abandon reference discovery and clear all discovered references.
200 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
201 rp->abandon_partial_discovery();
202
203 // e. Sync pinned region status from the CP marks
204 heap->sync_pinned_region_status();
205
206 if (heap->mode()->is_generational()) {
207 ShenandoahGenerationalFullGC::restore_top_before_promote(heap);
208 }
209
210 // The rest of prologue:
211 _preserved_marks->init(heap->workers()->active_workers());
212
213 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
214 }
215
216 if (UseTLAB) {
217 // Note: PLABs are also retired with GCLABs in generational mode.
218 heap->gclabs_retire(ResizeTLAB);
219 heap->tlabs_retire(ResizeTLAB);
220 }
221
222 OrderAccess::fence();
223
224 phase1_mark_heap();
225
226 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
227 // Coming out of Full GC, we would not have any forwarded objects.
228 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
229 heap->set_has_forwarded_objects(false);
230
231 heap->set_full_gc_move_in_progress(true);
232
233 // Setup workers for the rest
234 OrderAccess::fence();
235
236 // Initialize worker slices
237 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
238 for (uint i = 0; i < heap->max_workers(); i++) {
239 worker_slices[i] = new ShenandoahHeapRegionSet();
240 }
241
242 {
243 // The rest of code performs region moves, where region status is undefined
244 // until all phases run together.
245 ShenandoahHeapLocker lock(heap->lock());
246
247 phase2_calculate_target_addresses(worker_slices);
248
249 OrderAccess::fence();
250
251 phase3_update_references();
252
253 phase4_compact_objects(worker_slices);
254
255 phase5_epilog();
256 }
257
258 // Resize metaspace
259 MetaspaceGC::compute_new_size();
260
261 // Free worker slices
262 for (uint i = 0; i < heap->max_workers(); i++) {
263 delete worker_slices[i];
264 }
265 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
266
267 heap->set_full_gc_move_in_progress(false);
268 heap->set_full_gc_in_progress(false);
269
270 if (ShenandoahVerify) {
271 heap->verifier()->verify_after_fullgc();
272 }
273
274 if (VerifyAfterGC) {
275 Universe::verify();
276 }
277
278 {
279 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
280 heap->post_full_gc_dump(_gc_timer);
281 }
282 }
283
284 void ShenandoahFullGC::phase1_mark_heap() {
285 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
286 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
287
288 ShenandoahHeap* heap = ShenandoahHeap::heap();
289
290 heap->global_generation()->reset_mark_bitmap<true, true>();
291 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
292 assert(!heap->global_generation()->is_mark_complete(), "sanity");
293
294 heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
295
296 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
297 // enable ("weak") refs discovery
298 rp->set_soft_reference_policy(true); // forcefully purge all soft references
299
300 ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
301 mark.mark();
302 heap->parallel_cleaning(true /* full_gc */);
303
304 if (ShenandoahHeap::heap()->mode()->is_generational()) {
305 ShenandoahGenerationalFullGC::log_live_in_old(heap);
306 }
307 }
308
309 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
310 private:
311 PreservedMarks* const _preserved_marks;
312 ShenandoahHeap* const _heap;
313 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
314 int _empty_regions_pos;
315 ShenandoahHeapRegion* _to_region;
316 ShenandoahHeapRegion* _from_region;
317 HeapWord* _compact_point;
318
319 public:
320 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
321 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
322 ShenandoahHeapRegion* to_region) :
323 _preserved_marks(preserved_marks),
324 _heap(ShenandoahHeap::heap()),
325 _empty_regions(empty_regions),
326 _empty_regions_pos(0),
327 _to_region(to_region),
328 _from_region(nullptr),
329 _compact_point(to_region->bottom()) {}
330
331 void set_from_region(ShenandoahHeapRegion* from_region) {
332 _from_region = from_region;
333 }
334
335 void finish() {
336 assert(_to_region != nullptr, "should not happen");
337 _to_region->set_new_top(_compact_point);
338 }
339
340 bool is_compact_same_region() {
341 return _from_region == _to_region;
342 }
343
344 int empty_regions_pos() {
345 return _empty_regions_pos;
346 }
347
348 void do_object(oop p) {
349 assert(_from_region != nullptr, "must set before work");
350 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
351 assert(!_heap->gc_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
352
353 size_t obj_size = p->size();
354 if (_compact_point + obj_size > _to_region->end()) {
355 finish();
356
357 // Object doesn't fit. Pick next empty region and start compacting there.
358 ShenandoahHeapRegion* new_to_region;
359 if (_empty_regions_pos < _empty_regions.length()) {
360 new_to_region = _empty_regions.at(_empty_regions_pos);
361 _empty_regions_pos++;
362 } else {
363 // Out of empty region? Compact within the same region.
364 new_to_region = _from_region;
365 }
366
367 assert(new_to_region != _to_region, "must not reuse same to-region");
368 assert(new_to_region != nullptr, "must not be null");
369 _to_region = new_to_region;
370 _compact_point = _to_region->bottom();
371 }
372
373 // Object fits into current region, record new location, if object does not move:
374 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
375 shenandoah_assert_not_forwarded(nullptr, p);
376 if (_compact_point != cast_from_oop<HeapWord*>(p)) {
377 _preserved_marks->push_if_necessary(p, p->mark());
378 p->forward_to(cast_to_oop(_compact_point));
379 }
380 _compact_point += obj_size;
381 }
382 };
383
384 class ShenandoahPrepareForCompactionTask : public WorkerTask {
385 private:
386 PreservedMarksSet* const _preserved_marks;
387 ShenandoahHeap* const _heap;
388 ShenandoahHeapRegionSet** const _worker_slices;
389
390 public:
391 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
392 WorkerTask("Shenandoah Prepare For Compaction"),
393 _preserved_marks(preserved_marks),
394 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
395 }
396
397 static bool is_candidate_region(ShenandoahHeapRegion* r) {
398 // Empty region: get it into the slice to defragment the slice itself.
399 // We could have skipped this without violating correctness, but we really
400 // want to compact all live regions to the start of the heap, which sometimes
401 // means moving them into the fully empty regions.
402 if (r->is_empty()) return true;
403
404 // Can move the region, and this is not the humongous region. Humongous
405 // moves are special cased here, because their moves are handled separately.
406 return r->is_stw_move_allowed() && !r->is_humongous();
407 }
408
409 void work(uint worker_id) override;
410 private:
411 template<typename ClosureType>
412 void prepare_for_compaction(ClosureType& cl,
413 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
414 ShenandoahHeapRegionSetIterator& it,
415 ShenandoahHeapRegion* from_region);
416 };
417
418 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
419 ShenandoahParallelWorkerSession worker_session(worker_id);
420 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
421 ShenandoahHeapRegionSetIterator it(slice);
422 ShenandoahHeapRegion* from_region = it.next();
423 // No work?
424 if (from_region == nullptr) {
425 return;
426 }
427
428 // Sliding compaction. Walk all regions in the slice, and compact them.
429 // Remember empty regions and reuse them as needed.
430 ResourceMark rm;
431
432 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
433
434 if (_heap->mode()->is_generational()) {
435 ShenandoahPrepareForGenerationalCompactionObjectClosure cl(_preserved_marks->get(worker_id),
436 empty_regions, from_region, worker_id);
437 prepare_for_compaction(cl, empty_regions, it, from_region);
438 } else {
439 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
440 prepare_for_compaction(cl, empty_regions, it, from_region);
441 }
442 }
443
444 template<typename ClosureType>
445 void ShenandoahPrepareForCompactionTask::prepare_for_compaction(ClosureType& cl,
446 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
447 ShenandoahHeapRegionSetIterator& it,
448 ShenandoahHeapRegion* from_region) {
449 while (from_region != nullptr) {
450 assert(is_candidate_region(from_region), "Sanity");
451 cl.set_from_region(from_region);
452 if (from_region->has_live()) {
453 _heap->marked_object_iterate(from_region, &cl);
454 }
455
456 // Compacted the region to somewhere else? From-region is empty then.
457 if (!cl.is_compact_same_region()) {
458 empty_regions.append(from_region);
459 }
460 from_region = it.next();
461 }
462 cl.finish();
463
464 // Mark all remaining regions as empty
465 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
466 ShenandoahHeapRegion* r = empty_regions.at(pos);
467 r->set_new_top(r->bottom());
468 }
469 }
470
471 void ShenandoahFullGC::calculate_target_humongous_objects() {
472 ShenandoahHeap* heap = ShenandoahHeap::heap();
473
474 // Compute the new addresses for humongous objects. We need to do this after addresses
475 // for regular objects are calculated, and we know what regions in heap suffix are
476 // available for humongous moves.
477 //
478 // Scan the heap backwards, because we are compacting humongous regions towards the end.
479 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
480 // humongous start there.
481 //
482 // The complication is potential non-movable regions during the scan. If such region is
483 // detected, then sliding restarts towards that non-movable region.
484
485 size_t to_begin = heap->num_regions();
486 size_t to_end = heap->num_regions();
487
488 log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
489 for (size_t c = heap->num_regions(); c > 0; c--) {
490 ShenandoahHeapRegion *r = heap->get_region(c - 1);
491 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
492 // To-region candidate: record this, and continue scan
493 to_begin = r->index();
494 continue;
495 }
496
497 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
498 // From-region candidate: movable humongous region
499 oop old_obj = cast_to_oop(r->bottom());
500 size_t words_size = old_obj->size();
501 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
502
503 size_t start = to_end - num_regions;
504
505 if (start >= to_begin && start != r->index()) {
506 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
507 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
508 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
509 to_end = start;
510 continue;
511 }
512 }
513
514 // Failed to fit. Scan starting from current region.
515 to_begin = r->index();
516 to_end = r->index();
517 }
518 }
519
520 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
521 private:
522 ShenandoahHeap* const _heap;
523
524 public:
525 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
526 void heap_region_do(ShenandoahHeapRegion* r) {
527 if (r->is_trash()) {
528 r->try_recycle_under_lock();
529 }
530 if (r->is_cset()) {
531 // Leave affiliation unchanged
532 r->make_regular_bypass();
533 }
534 if (r->is_empty_uncommitted()) {
535 r->make_committed_bypass();
536 }
537 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
538
539 // Record current region occupancy: this communicates empty regions are free
540 // to the rest of Full GC code.
541 r->set_new_top(r->top());
542 }
543 };
544
545 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
546 private:
547 ShenandoahHeap* const _heap;
548 ShenandoahMarkingContext* const _ctx;
549
550 public:
551 ShenandoahTrashImmediateGarbageClosure() :
552 _heap(ShenandoahHeap::heap()),
553 _ctx(ShenandoahHeap::heap()->global_generation()->complete_marking_context()) {}
554
555 void heap_region_do(ShenandoahHeapRegion* r) override {
556 if (r->is_humongous_start()) {
557 oop humongous_obj = cast_to_oop(r->bottom());
558 if (!_ctx->is_marked(humongous_obj)) {
559 assert(!r->has_live(), "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
560 _heap->trash_humongous_region_at(r);
561 } else {
562 assert(r->has_live(), "Region " SIZE_FORMAT " should have live", r->index());
563 }
564 } else if (r->is_humongous_continuation()) {
565 // If we hit continuation, the non-live humongous starts should have been trashed already
566 assert(r->humongous_start_region()->has_live(), "Region " SIZE_FORMAT " should have live", r->index());
567 } else if (r->is_regular()) {
568 if (!r->has_live()) {
569 r->make_trash_immediate();
570 }
571 }
572 }
573 };
574
575 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
576 ShenandoahHeap* heap = ShenandoahHeap::heap();
577
578 uint n_workers = heap->workers()->active_workers();
579 size_t n_regions = heap->num_regions();
580
581 // What we want to accomplish: have the dense prefix of data, while still balancing
582 // out the parallel work.
583 //
584 // Assuming the amount of work is driven by the live data that needs moving, we can slice
585 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
586 // thread takes all regions in its prefix subset, and then it takes some regions from
587 // the tail.
588 //
589 // Tail region selection becomes interesting.
590 //
591 // First, we want to distribute the regions fairly between the workers, and those regions
592 // might have different amount of live data. So, until we sure no workers need live data,
593 // we need to only take what the worker needs.
594 //
595 // Second, since we slide everything to the left in each slice, the most busy regions
596 // would be the ones on the left. Which means we want to have all workers have their after-tail
597 // regions as close to the left as possible.
598 //
599 // The easiest way to do this is to distribute after-tail regions in round-robin between
600 // workers that still need live data.
601 //
602 // Consider parallel workers A, B, C, then the target slice layout would be:
603 //
604 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA
605 //
606 // (.....dense-prefix.....) (.....................tail...................)
607 // [all regions fully live] [left-most regions are fuller that right-most]
608 //
609
610 // Compute how much live data is there. This would approximate the size of dense prefix
611 // we target to create.
612 size_t total_live = 0;
613 for (size_t idx = 0; idx < n_regions; idx++) {
614 ShenandoahHeapRegion *r = heap->get_region(idx);
615 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
616 total_live += r->get_live_data_words();
617 }
618 }
619
620 // Estimate the size for the dense prefix. Note that we specifically count only the
621 // "full" regions, so there would be some non-full regions in the slice tail.
622 size_t live_per_worker = total_live / n_workers;
623 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words();
624 size_t prefix_regions_total = prefix_regions_per_worker * n_workers;
625 prefix_regions_total = MIN2(prefix_regions_total, n_regions);
626 assert(prefix_regions_total <= n_regions, "Sanity");
627
628 // There might be non-candidate regions in the prefix. To compute where the tail actually
629 // ends up being, we need to account those as well.
630 size_t prefix_end = prefix_regions_total;
631 for (size_t idx = 0; idx < prefix_regions_total; idx++) {
632 ShenandoahHeapRegion *r = heap->get_region(idx);
633 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
634 prefix_end++;
635 }
636 }
637 prefix_end = MIN2(prefix_end, n_regions);
638 assert(prefix_end <= n_regions, "Sanity");
639
640 // Distribute prefix regions per worker: each thread definitely gets its own same-sized
641 // subset of dense prefix.
642 size_t prefix_idx = 0;
643
644 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC);
645
646 for (size_t wid = 0; wid < n_workers; wid++) {
647 ShenandoahHeapRegionSet* slice = worker_slices[wid];
648
649 live[wid] = 0;
650 size_t regs = 0;
651
652 // Add all prefix regions for this worker
653 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) {
654 ShenandoahHeapRegion *r = heap->get_region(prefix_idx);
655 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
656 slice->add_region(r);
657 live[wid] += r->get_live_data_words();
658 regs++;
659 }
660 prefix_idx++;
661 }
662 }
663
664 // Distribute the tail among workers in round-robin fashion.
665 size_t wid = n_workers - 1;
666
667 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) {
668 ShenandoahHeapRegion *r = heap->get_region(tail_idx);
669 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) {
670 assert(wid < n_workers, "Sanity");
671
672 size_t live_region = r->get_live_data_words();
673
674 // Select next worker that still needs live data.
675 size_t old_wid = wid;
676 do {
677 wid++;
678 if (wid == n_workers) wid = 0;
679 } while (live[wid] + live_region >= live_per_worker && old_wid != wid);
680
681 if (old_wid == wid) {
682 // Circled back to the same worker? This means liveness data was
683 // miscalculated. Bump the live_per_worker limit so that
684 // everyone gets a piece of the leftover work.
685 live_per_worker += ShenandoahHeapRegion::region_size_words();
686 }
687
688 worker_slices[wid]->add_region(r);
689 live[wid] += live_region;
690 }
691 }
692
693 FREE_C_HEAP_ARRAY(size_t, live);
694
695 #ifdef ASSERT
696 ResourceBitMap map(n_regions);
697 for (size_t wid = 0; wid < n_workers; wid++) {
698 ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
699 ShenandoahHeapRegion* r = it.next();
700 while (r != nullptr) {
701 size_t idx = r->index();
702 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
703 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
704 map.at_put(idx, true);
705 r = it.next();
706 }
707 }
708
709 for (size_t rid = 0; rid < n_regions; rid++) {
710 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid));
711 bool is_distributed = map.at(rid);
712 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid);
713 }
714 #endif
715 }
716
717 void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
718 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
719 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
720
721 ShenandoahHeap* heap = ShenandoahHeap::heap();
722
723 // About to figure out which regions can be compacted, make sure pinning status
724 // had been updated in GC prologue.
725 heap->assert_pinned_region_status();
726
727 {
728 // Trash the immediately collectible regions before computing addresses
729 ShenandoahTrashImmediateGarbageClosure trash_immediate_garbage;
730 ShenandoahExcludeRegionClosure<FREE> cl(&trash_immediate_garbage);
731 heap->heap_region_iterate(&cl);
732
733 // Make sure regions are in good state: committed, active, clean.
734 // This is needed because we are potentially sliding the data through them.
735 ShenandoahEnsureHeapActiveClosure ecl;
736 heap->heap_region_iterate(&ecl);
737 }
738
739 // Compute the new addresses for regular objects
740 {
741 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
742
743 distribute_slices(worker_slices);
744
745 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
746 heap->workers()->run_task(&task);
747 }
748
749 // Compute the new addresses for humongous objects
750 {
751 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
752 calculate_target_humongous_objects();
753 }
754 }
755
756 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
757 private:
758 ShenandoahHeap* const _heap;
759 ShenandoahMarkingContext* const _ctx;
760
761 template <class T>
762 inline void do_oop_work(T* p) {
763 T o = RawAccess<>::oop_load(p);
764 if (!CompressedOops::is_null(o)) {
765 oop obj = CompressedOops::decode_not_null(o);
766 assert(_ctx->is_marked(obj), "must be marked");
767 if (obj->is_forwarded()) {
768 oop forw = obj->forwardee();
769 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
770 }
771 }
772 }
773
774 public:
775 ShenandoahAdjustPointersClosure() :
776 _heap(ShenandoahHeap::heap()),
777 _ctx(ShenandoahHeap::heap()->gc_generation()->complete_marking_context()) {}
778
779 void do_oop(oop* p) { do_oop_work(p); }
780 void do_oop(narrowOop* p) { do_oop_work(p); }
781 void do_method(Method* m) {}
782 void do_nmethod(nmethod* nm) {}
783 };
784
785 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
786 private:
787 ShenandoahHeap* const _heap;
788 ShenandoahAdjustPointersClosure _cl;
789
790 public:
791 ShenandoahAdjustPointersObjectClosure() :
792 _heap(ShenandoahHeap::heap()) {
793 }
794 void do_object(oop p) {
795 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
796 p->oop_iterate(&_cl);
797 }
798 };
799
800 class ShenandoahAdjustPointersTask : public WorkerTask {
801 private:
802 ShenandoahHeap* const _heap;
803 ShenandoahRegionIterator _regions;
804
805 public:
806 ShenandoahAdjustPointersTask() :
807 WorkerTask("Shenandoah Adjust Pointers"),
808 _heap(ShenandoahHeap::heap()) {
809 }
810
811 void work(uint worker_id) {
812 ShenandoahParallelWorkerSession worker_session(worker_id);
813 ShenandoahAdjustPointersObjectClosure obj_cl;
814 ShenandoahHeapRegion* r = _regions.next();
815 while (r != nullptr) {
816 if (!r->is_humongous_continuation() && r->has_live()) {
817 _heap->marked_object_iterate(r, &obj_cl);
818 }
819 if (_heap->mode()->is_generational()) {
820 ShenandoahGenerationalFullGC::maybe_coalesce_and_fill_region(r);
821 }
822 r = _regions.next();
823 }
824 }
825 };
826
827 class ShenandoahAdjustRootPointersTask : public WorkerTask {
828 private:
829 ShenandoahRootAdjuster* _rp;
830 PreservedMarksSet* _preserved_marks;
831 public:
832 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
833 WorkerTask("Shenandoah Adjust Root Pointers"),
834 _rp(rp),
835 _preserved_marks(preserved_marks) {}
836
837 void work(uint worker_id) {
838 ShenandoahParallelWorkerSession worker_session(worker_id);
839 ShenandoahAdjustPointersClosure cl;
840 _rp->roots_do(worker_id, &cl);
841 _preserved_marks->get(worker_id)->adjust_during_full_gc();
842 }
843 };
844
845 void ShenandoahFullGC::phase3_update_references() {
846 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
847 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
848
849 ShenandoahHeap* heap = ShenandoahHeap::heap();
850
851 WorkerThreads* workers = heap->workers();
852 uint nworkers = workers->active_workers();
853 {
854 #if COMPILER2_OR_JVMCI
855 DerivedPointerTable::clear();
856 #endif
857 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
858 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
859 workers->run_task(&task);
860 #if COMPILER2_OR_JVMCI
861 DerivedPointerTable::update_pointers();
862 #endif
863 }
864
865 ShenandoahAdjustPointersTask adjust_pointers_task;
866 workers->run_task(&adjust_pointers_task);
867 }
868
869 class ShenandoahCompactObjectsClosure : public ObjectClosure {
870 private:
871 ShenandoahHeap* const _heap;
872 uint const _worker_id;
873
874 public:
875 ShenandoahCompactObjectsClosure(uint worker_id) :
876 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
877
878 void do_object(oop p) {
879 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
880 size_t size = p->size();
881 if (p->is_forwarded()) {
882 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
883 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
884 assert(compact_from != compact_to, "Forwarded object should move");
885 Copy::aligned_conjoint_words(compact_from, compact_to, size);
886 oop new_obj = cast_to_oop(compact_to);
887
888 ContinuationGCSupport::relativize_stack_chunk(new_obj);
889 new_obj->init_mark();
890 }
891 }
892 };
893
894 class ShenandoahCompactObjectsTask : public WorkerTask {
895 private:
896 ShenandoahHeap* const _heap;
897 ShenandoahHeapRegionSet** const _worker_slices;
898
899 public:
900 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
901 WorkerTask("Shenandoah Compact Objects"),
902 _heap(ShenandoahHeap::heap()),
903 _worker_slices(worker_slices) {
904 }
905
906 void work(uint worker_id) {
907 ShenandoahParallelWorkerSession worker_session(worker_id);
908 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
909
910 ShenandoahCompactObjectsClosure cl(worker_id);
911 ShenandoahHeapRegion* r = slice.next();
912 while (r != nullptr) {
913 assert(!r->is_humongous(), "must not get humongous regions here");
914 if (r->has_live()) {
915 _heap->marked_object_iterate(r, &cl);
916 }
917 r->set_top(r->new_top());
918 r = slice.next();
919 }
920 }
921 };
922
923 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
924 private:
925 ShenandoahHeap* const _heap;
926 bool _is_generational;
927 size_t _young_regions, _young_usage, _young_humongous_waste;
928 size_t _old_regions, _old_usage, _old_humongous_waste;
929
930 public:
931 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()),
932 _is_generational(_heap->mode()->is_generational()),
933 _young_regions(0),
934 _young_usage(0),
935 _young_humongous_waste(0),
936 _old_regions(0),
937 _old_usage(0),
938 _old_humongous_waste(0)
939 {
940 _heap->free_set()->clear();
941 }
942
943 void heap_region_do(ShenandoahHeapRegion* r) {
944 assert (!r->is_cset(), "cset regions should have been demoted already");
945
946 // Need to reset the complete-top-at-mark-start pointer here because
947 // the complete marking bitmap is no longer valid. This ensures
948 // size-based iteration in marked_object_iterate().
949 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
950 // pinned regions.
951 if (!r->is_pinned()) {
952 _heap->gc_generation()->complete_marking_context()->reset_top_at_mark_start(r);
953 }
954
955 size_t live = r->used();
956
957 // Make empty regions that have been allocated into regular
958 if (r->is_empty() && live > 0) {
959 if (!_is_generational) {
960 r->make_affiliated_maybe();
961 }
962 // else, generational mode compaction has already established affiliation.
963 r->make_regular_bypass();
964 if (ZapUnusedHeapArea) {
965 SpaceMangler::mangle_region(MemRegion(r->top(), r->end()));
966 }
967 }
968
969 // Reclaim regular regions that became empty
970 if (r->is_regular() && live == 0) {
971 r->make_trash();
972 }
973
974 // Recycle all trash regions
975 if (r->is_trash()) {
976 live = 0;
977 r->try_recycle_under_lock();
978 } else {
979 if (r->is_old()) {
980 ShenandoahGenerationalFullGC::account_for_region(r, _old_regions, _old_usage, _old_humongous_waste);
981 } else if (r->is_young()) {
982 ShenandoahGenerationalFullGC::account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
983 }
984 }
985 r->set_live_data(live);
986 r->reset_alloc_metadata();
987 }
988
989 void update_generation_usage() {
990 if (_is_generational) {
991 _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste);
992 _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste);
993 } else {
994 assert(_old_regions == 0, "Old regions only expected in generational mode");
995 assert(_old_usage == 0, "Old usage only expected in generational mode");
996 assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode");
997 }
998
999 // In generational mode, global usage should be the sum of young and old. This is also true
1000 // for non-generational modes except that there are no old regions.
1001 _heap->global_generation()->establish_usage(_old_regions + _young_regions,
1002 _old_usage + _young_usage,
1003 _old_humongous_waste + _young_humongous_waste);
1004 }
1005 };
1006
1007 void ShenandoahFullGC::compact_humongous_objects() {
1008 // Compact humongous regions, based on their fwdptr objects.
1009 //
1010 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1011 // humongous regions are already compacted, and do not require further moves, which alleviates
1012 // sliding costs. We may consider doing this in parallel in the future.
1013
1014 ShenandoahHeap* heap = ShenandoahHeap::heap();
1015
1016 for (size_t c = heap->num_regions(); c > 0; c--) {
1017 ShenandoahHeapRegion* r = heap->get_region(c - 1);
1018 if (r->is_humongous_start()) {
1019 oop old_obj = cast_to_oop(r->bottom());
1020 if (!old_obj->is_forwarded()) {
1021 // No need to move the object, it stays at the same slot
1022 continue;
1023 }
1024 size_t words_size = old_obj->size();
1025 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1026
1027 size_t old_start = r->index();
1028 size_t old_end = old_start + num_regions - 1;
1029 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1030 size_t new_end = new_start + num_regions - 1;
1031 assert(old_start != new_start, "must be real move");
1032 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1033
1034 log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT, old_start, new_start);
1035 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1036 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1037
1038 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1039 new_obj->init_mark();
1040
1041 {
1042 ShenandoahAffiliation original_affiliation = r->affiliation();
1043 for (size_t c = old_start; c <= old_end; c++) {
1044 ShenandoahHeapRegion* r = heap->get_region(c);
1045 // Leave humongous region affiliation unchanged.
1046 r->make_regular_bypass();
1047 r->set_top(r->bottom());
1048 }
1049
1050 for (size_t c = new_start; c <= new_end; c++) {
1051 ShenandoahHeapRegion* r = heap->get_region(c);
1052 if (c == new_start) {
1053 r->make_humongous_start_bypass(original_affiliation);
1054 } else {
1055 r->make_humongous_cont_bypass(original_affiliation);
1056 }
1057
1058 // Trailing region may be non-full, record the remainder there
1059 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1060 if ((c == new_end) && (remainder != 0)) {
1061 r->set_top(r->bottom() + remainder);
1062 } else {
1063 r->set_top(r->end());
1064 }
1065
1066 r->reset_alloc_metadata();
1067 }
1068 }
1069 }
1070 }
1071 }
1072
1073 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1074 // we need to remain able to walk pinned regions.
1075 // Since pinned region do not move and don't get compacted, we will get holes with
1076 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1077 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1078 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1079 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1080 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1081 private:
1082 ShenandoahRegionIterator _regions;
1083
1084 public:
1085 ShenandoahMCResetCompleteBitmapTask() :
1086 WorkerTask("Shenandoah Reset Bitmap") {
1087 }
1088
1089 void work(uint worker_id) {
1090 ShenandoahParallelWorkerSession worker_session(worker_id);
1091 ShenandoahHeapRegion* region = _regions.next();
1092 ShenandoahHeap* heap = ShenandoahHeap::heap();
1093 ShenandoahMarkingContext* const ctx = heap->gc_generation()->complete_marking_context();
1094 while (region != nullptr) {
1095 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
1096 ctx->clear_bitmap(region);
1097 }
1098 region = _regions.next();
1099 }
1100 }
1101 };
1102
1103 void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
1104 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
1105 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
1106
1107 ShenandoahHeap* heap = ShenandoahHeap::heap();
1108
1109 // Compact regular objects first
1110 {
1111 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
1112 ShenandoahCompactObjectsTask compact_task(worker_slices);
1113 heap->workers()->run_task(&compact_task);
1114 }
1115
1116 // Compact humongous objects after regular object moves
1117 {
1118 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1119 compact_humongous_objects();
1120 }
1121 }
1122
1123 void ShenandoahFullGC::phase5_epilog() {
1124 GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer);
1125 ShenandoahHeap* heap = ShenandoahHeap::heap();
1126
1127 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1128 // and must ensure the bitmap is in sync.
1129 {
1130 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1131 ShenandoahMCResetCompleteBitmapTask task;
1132 heap->workers()->run_task(&task);
1133 }
1134
1135 // Bring regions in proper states after the collection, and set heap properties.
1136 {
1137 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1138 ShenandoahPostCompactClosure post_compact;
1139 heap->heap_region_iterate(&post_compact);
1140 post_compact.update_generation_usage();
1141
1142 if (heap->mode()->is_generational()) {
1143 ShenandoahGenerationalFullGC::balance_generations_after_gc(heap);
1144 }
1145
1146 heap->collection_set()->clear();
1147 size_t young_cset_regions, old_cset_regions;
1148 size_t first_old, last_old, num_old;
1149 heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
1150
1151 // We also do not expand old generation size following Full GC because we have scrambled age populations and
1152 // no longer have objects separated by age into distinct regions.
1153 if (heap->mode()->is_generational()) {
1154 ShenandoahGenerationalFullGC::compute_balances();
1155 }
1156
1157 heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
1158
1159 // Set mark incomplete because the marking bitmaps have been reset except pinned regions.
1160 heap->global_generation()->set_mark_incomplete();
1161
1162 heap->clear_cancelled_gc(true /* clear oom handler */);
1163 }
1164
1165 _preserved_marks->restore(heap->workers());
1166 _preserved_marks->reclaim();
1167
1168 // We defer generation resizing actions until after cset regions have been recycled. We do this even following an
1169 // abbreviated cycle.
1170 if (heap->mode()->is_generational()) {
1171 ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set();
1172 ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
1173 }
1174 }