17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/preservedMarks.inline.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "gc/shared/workerThread.hpp"
32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
36 #include "gc/shenandoah/shenandoahFullGC.hpp"
37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
44 #include "gc/shenandoah/shenandoahMetrics.hpp"
45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
46 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
47 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
48 #include "gc/shenandoah/shenandoahSTWMark.hpp"
49 #include "gc/shenandoah/shenandoahUtils.hpp"
50 #include "gc/shenandoah/shenandoahVerifier.hpp"
51 #include "gc/shenandoah/shenandoahVMOperations.hpp"
52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
53 #include "memory/metaspaceUtils.hpp"
54 #include "memory/universe.hpp"
55 #include "oops/compressedOops.inline.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "runtime/orderAccess.hpp"
58 #include "runtime/thread.hpp"
59 #include "runtime/vmThread.hpp"
60 #include "utilities/copy.hpp"
61 #include "utilities/events.hpp"
62 #include "utilities/growableArray.hpp"
63
64 ShenandoahFullGC::ShenandoahFullGC() :
65 _gc_timer(ShenandoahHeap::heap()->gc_timer()),
66 _preserved_marks(new PreservedMarksSet(true)) {}
67
68 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
69 vmop_entry_full(cause);
70 // Always success
71 return true;
72 }
73
74 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
75 ShenandoahHeap* const heap = ShenandoahHeap::heap();
76 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
77 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
78
79 heap->try_inject_alloc_failure();
80 VM_ShenandoahFullGC op(cause, this);
81 VMThread::execute(&op);
82 }
83
84 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
85 static const char* msg = "Pause Full";
86 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
87 EventMark em("%s", msg);
88
89 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
90 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
91 "full gc");
92
93 op_full(cause);
94 }
95
96 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
97 ShenandoahMetricsSnapshot metrics;
98 metrics.snap_before();
99
100 // Perform full GC
101 do_it(cause);
102
103 metrics.snap_after();
104
105 if (metrics.is_good_progress()) {
106 ShenandoahHeap::heap()->notify_gc_progress();
107 } else {
108 // Nothing to do. Tell the allocation path that we have failed to make
109 // progress, and it can finally fail.
110 ShenandoahHeap::heap()->notify_gc_no_progress();
111 }
112 }
113
114 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
115 ShenandoahHeap* heap = ShenandoahHeap::heap();
116
117 if (ShenandoahVerify) {
118 heap->verifier()->verify_before_fullgc();
119 }
120
121 if (VerifyBeforeGC) {
122 Universe::verify();
123 }
124
125 // Degenerated GC may carry concurrent root flags when upgrading to
126 // full GC. We need to reset it before mutators resume.
127 heap->set_concurrent_strong_root_in_progress(false);
128 heap->set_concurrent_weak_root_in_progress(false);
129
130 heap->set_full_gc_in_progress(true);
131
132 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
133 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
134
135 {
139
140 {
141 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
142 // Full GC is supposed to recover from any GC state:
143
144 // a0. Remember if we have forwarded objects
145 bool has_forwarded_objects = heap->has_forwarded_objects();
146
147 // a1. Cancel evacuation, if in progress
148 if (heap->is_evacuation_in_progress()) {
149 heap->set_evacuation_in_progress(false);
150 }
151 assert(!heap->is_evacuation_in_progress(), "sanity");
152
153 // a2. Cancel update-refs, if in progress
154 if (heap->is_update_refs_in_progress()) {
155 heap->set_update_refs_in_progress(false);
156 }
157 assert(!heap->is_update_refs_in_progress(), "sanity");
158
159 // b. Cancel concurrent mark, if in progress
160 if (heap->is_concurrent_mark_in_progress()) {
161 ShenandoahConcurrentGC::cancel();
162 heap->set_concurrent_mark_in_progress(false);
163 }
164 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
165
166 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
167 if (has_forwarded_objects) {
168 update_roots(true /*full_gc*/);
169 }
170
171 // d. Reset the bitmaps for new marking
172 heap->reset_mark_bitmap();
173 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
174 assert(!heap->marking_context()->is_complete(), "sanity");
175
176 // e. Abandon reference discovery and clear all discovered references.
177 ShenandoahReferenceProcessor* rp = heap->ref_processor();
178 rp->abandon_partial_discovery();
179
180 // f. Sync pinned region status from the CP marks
181 heap->sync_pinned_region_status();
182
183 // The rest of prologue:
184 _preserved_marks->init(heap->workers()->active_workers());
185
186 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
187 }
188
189 if (UseTLAB) {
190 heap->gclabs_retire(ResizeTLAB);
191 heap->tlabs_retire(ResizeTLAB);
192 }
193
194 OrderAccess::fence();
195
196 phase1_mark_heap();
197
198 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
199 // Coming out of Full GC, we would not have any forwarded objects.
200 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
201 heap->set_has_forwarded_objects(false);
202
203 heap->set_full_gc_move_in_progress(true);
204
205 // Setup workers for the rest
206 OrderAccess::fence();
207
208 // Initialize worker slices
209 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
212 }
213
214 {
215 // The rest of code performs region moves, where region status is undefined
216 // until all phases run together.
217 ShenandoahHeapLocker lock(heap->lock());
218
219 phase2_calculate_target_addresses(worker_slices);
220
221 OrderAccess::fence();
222
223 phase3_update_references();
224
225 phase4_compact_objects(worker_slices);
226 }
227
228 {
229 // Epilogue
230 _preserved_marks->restore(heap->workers());
231 _preserved_marks->reclaim();
232 }
233
234 // Resize metaspace
235 MetaspaceGC::compute_new_size();
236
237 // Free worker slices
238 for (uint i = 0; i < heap->max_workers(); i++) {
239 delete worker_slices[i];
240 }
241 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
242
243 heap->set_full_gc_move_in_progress(false);
244 heap->set_full_gc_in_progress(false);
245
246 if (ShenandoahVerify) {
247 heap->verifier()->verify_after_fullgc();
248 }
249
250 if (VerifyAfterGC) {
251 Universe::verify();
252 }
253
254 {
255 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
256 heap->post_full_gc_dump(_gc_timer);
257 }
258 }
259
260 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
261 private:
262 ShenandoahMarkingContext* const _ctx;
263
264 public:
265 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
266
267 void heap_region_do(ShenandoahHeapRegion *r) {
268 _ctx->capture_top_at_mark_start(r);
269 r->clear_live_data();
270 }
271 };
272
273 void ShenandoahFullGC::phase1_mark_heap() {
274 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
275 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
276
277 ShenandoahHeap* heap = ShenandoahHeap::heap();
278
279 ShenandoahPrepareForMarkClosure cl;
280 heap->heap_region_iterate(&cl);
281
282 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
283
284 ShenandoahReferenceProcessor* rp = heap->ref_processor();
285 // enable ("weak") refs discovery
286 rp->set_soft_reference_policy(true); // forcefully purge all soft references
287
288 ShenandoahSTWMark mark(true /*full_gc*/);
289 mark.mark();
290 heap->parallel_cleaning(true /* full_gc */);
291 }
292
293 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
294 private:
295 PreservedMarks* const _preserved_marks;
296 ShenandoahHeap* const _heap;
297 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
298 int _empty_regions_pos;
299 ShenandoahHeapRegion* _to_region;
300 ShenandoahHeapRegion* _from_region;
301 HeapWord* _compact_point;
302
303 public:
304 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
305 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
306 ShenandoahHeapRegion* to_region) :
307 _preserved_marks(preserved_marks),
308 _heap(ShenandoahHeap::heap()),
309 _empty_regions(empty_regions),
310 _empty_regions_pos(0),
311 _to_region(to_region),
312 _from_region(NULL),
313 _compact_point(to_region->bottom()) {}
314
315 void set_from_region(ShenandoahHeapRegion* from_region) {
316 _from_region = from_region;
317 }
318
319 void finish_region() {
320 assert(_to_region != NULL, "should not happen");
321 _to_region->set_new_top(_compact_point);
322 }
323
324 bool is_compact_same_region() {
325 return _from_region == _to_region;
326 }
327
328 int empty_regions_pos() {
329 return _empty_regions_pos;
330 }
331
332 void do_object(oop p) {
333 assert(_from_region != NULL, "must set before work");
334 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
335 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
336
337 size_t obj_size = p->size();
338 if (_compact_point + obj_size > _to_region->end()) {
339 finish_region();
340
346 } else {
347 // Out of empty region? Compact within the same region.
348 new_to_region = _from_region;
349 }
350
351 assert(new_to_region != _to_region, "must not reuse same to-region");
352 assert(new_to_region != NULL, "must not be NULL");
353 _to_region = new_to_region;
354 _compact_point = _to_region->bottom();
355 }
356
357 // Object fits into current region, record new location:
358 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
359 shenandoah_assert_not_forwarded(NULL, p);
360 _preserved_marks->push_if_necessary(p, p->mark());
361 p->forward_to(cast_to_oop(_compact_point));
362 _compact_point += obj_size;
363 }
364 };
365
366 class ShenandoahPrepareForCompactionTask : public WorkerTask {
367 private:
368 PreservedMarksSet* const _preserved_marks;
369 ShenandoahHeap* const _heap;
370 ShenandoahHeapRegionSet** const _worker_slices;
371
372 public:
373 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
374 WorkerTask("Shenandoah Prepare For Compaction"),
375 _preserved_marks(preserved_marks),
376 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
377 }
378
379 static bool is_candidate_region(ShenandoahHeapRegion* r) {
380 // Empty region: get it into the slice to defragment the slice itself.
381 // We could have skipped this without violating correctness, but we really
382 // want to compact all live regions to the start of the heap, which sometimes
383 // means moving them into the fully empty regions.
384 if (r->is_empty()) return true;
385
386 // Can move the region, and this is not the humongous region. Humongous
387 // moves are special cased here, because their moves are handled separately.
388 return r->is_stw_move_allowed() && !r->is_humongous();
389 }
390
391 void work(uint worker_id) {
392 ShenandoahParallelWorkerSession worker_session(worker_id);
393 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
394 ShenandoahHeapRegionSetIterator it(slice);
395 ShenandoahHeapRegion* from_region = it.next();
396 // No work?
397 if (from_region == NULL) {
398 return;
399 }
400
401 // Sliding compaction. Walk all regions in the slice, and compact them.
402 // Remember empty regions and reuse them as needed.
403 ResourceMark rm;
404
405 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
406
407 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
408
409 while (from_region != NULL) {
410 assert(is_candidate_region(from_region), "Sanity");
411
412 cl.set_from_region(from_region);
413 if (from_region->has_live()) {
414 _heap->marked_object_iterate(from_region, &cl);
415 }
416
417 // Compacted the region to somewhere else? From-region is empty then.
418 if (!cl.is_compact_same_region()) {
419 empty_regions.append(from_region);
420 }
421 from_region = it.next();
422 }
423 cl.finish_region();
424
425 // Mark all remaining regions as empty
426 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
427 ShenandoahHeapRegion* r = empty_regions.at(pos);
428 r->set_new_top(r->bottom());
429 }
430 }
431 };
432
433 void ShenandoahFullGC::calculate_target_humongous_objects() {
434 ShenandoahHeap* heap = ShenandoahHeap::heap();
435
436 // Compute the new addresses for humongous objects. We need to do this after addresses
437 // for regular objects are calculated, and we know what regions in heap suffix are
438 // available for humongous moves.
439 //
440 // Scan the heap backwards, because we are compacting humongous regions towards the end.
441 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
442 // humongous start there.
443 //
444 // The complication is potential non-movable regions during the scan. If such region is
445 // detected, then sliding restarts towards that non-movable region.
446
447 size_t to_begin = heap->num_regions();
448 size_t to_end = heap->num_regions();
449
450 for (size_t c = heap->num_regions(); c > 0; c--) {
451 ShenandoahHeapRegion *r = heap->get_region(c - 1);
452 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
453 // To-region candidate: record this, and continue scan
454 to_begin = r->index();
455 continue;
456 }
457
458 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
459 // From-region candidate: movable humongous region
460 oop old_obj = cast_to_oop(r->bottom());
461 size_t words_size = old_obj->size();
462 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
463
464 size_t start = to_end - num_regions;
465
466 if (start >= to_begin && start != r->index()) {
467 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
468 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
469 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
496 }
497 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
498
499 // Record current region occupancy: this communicates empty regions are free
500 // to the rest of Full GC code.
501 r->set_new_top(r->top());
502 }
503 };
504
505 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
506 private:
507 ShenandoahHeap* const _heap;
508 ShenandoahMarkingContext* const _ctx;
509
510 public:
511 ShenandoahTrashImmediateGarbageClosure() :
512 _heap(ShenandoahHeap::heap()),
513 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
514
515 void heap_region_do(ShenandoahHeapRegion* r) {
516 if (r->is_humongous_start()) {
517 oop humongous_obj = cast_to_oop(r->bottom());
518 if (!_ctx->is_marked(humongous_obj)) {
519 assert(!r->has_live(),
520 "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
521 _heap->trash_humongous_region_at(r);
522 } else {
523 assert(r->has_live(),
524 "Region " SIZE_FORMAT " should have live", r->index());
525 }
526 } else if (r->is_humongous_continuation()) {
527 // If we hit continuation, the non-live humongous starts should have been trashed already
528 assert(r->humongous_start_region()->has_live(),
529 "Region " SIZE_FORMAT " should have live", r->index());
530 } else if (r->is_regular()) {
531 if (!r->has_live()) {
532 r->make_trash_immediate();
533 }
534 }
535 }
536 };
537
538 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
539 ShenandoahHeap* heap = ShenandoahHeap::heap();
540
541 uint n_workers = heap->workers()->active_workers();
542 size_t n_regions = heap->num_regions();
543
544 // What we want to accomplish: have the dense prefix of data, while still balancing
545 // out the parallel work.
546 //
547 // Assuming the amount of work is driven by the live data that needs moving, we can slice
548 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
549 // thread takes all regions in its prefix subset, and then it takes some regions from
550 // the tail.
551 //
552 // Tail region selection becomes interesting.
553 //
554 // First, we want to distribute the regions fairly between the workers, and those regions
681 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
682 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
683
684 ShenandoahHeap* heap = ShenandoahHeap::heap();
685
686 // About to figure out which regions can be compacted, make sure pinning status
687 // had been updated in GC prologue.
688 heap->assert_pinned_region_status();
689
690 {
691 // Trash the immediately collectible regions before computing addresses
692 ShenandoahTrashImmediateGarbageClosure tigcl;
693 heap->heap_region_iterate(&tigcl);
694
695 // Make sure regions are in good state: committed, active, clean.
696 // This is needed because we are potentially sliding the data through them.
697 ShenandoahEnsureHeapActiveClosure ecl;
698 heap->heap_region_iterate(&ecl);
699 }
700
701 // Compute the new addresses for regular objects
702 {
703 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
704
705 distribute_slices(worker_slices);
706
707 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
708 heap->workers()->run_task(&task);
709 }
710
711 // Compute the new addresses for humongous objects
712 {
713 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
714 calculate_target_humongous_objects();
715 }
716 }
717
718 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
719 private:
720 ShenandoahHeap* const _heap;
721 ShenandoahMarkingContext* const _ctx;
722
723 template <class T>
724 inline void do_oop_work(T* p) {
725 T o = RawAccess<>::oop_load(p);
726 if (!CompressedOops::is_null(o)) {
727 oop obj = CompressedOops::decode_not_null(o);
759
760 class ShenandoahAdjustPointersTask : public WorkerTask {
761 private:
762 ShenandoahHeap* const _heap;
763 ShenandoahRegionIterator _regions;
764
765 public:
766 ShenandoahAdjustPointersTask() :
767 WorkerTask("Shenandoah Adjust Pointers"),
768 _heap(ShenandoahHeap::heap()) {
769 }
770
771 void work(uint worker_id) {
772 ShenandoahParallelWorkerSession worker_session(worker_id);
773 ShenandoahAdjustPointersObjectClosure obj_cl;
774 ShenandoahHeapRegion* r = _regions.next();
775 while (r != NULL) {
776 if (!r->is_humongous_continuation() && r->has_live()) {
777 _heap->marked_object_iterate(r, &obj_cl);
778 }
779 r = _regions.next();
780 }
781 }
782 };
783
784 class ShenandoahAdjustRootPointersTask : public WorkerTask {
785 private:
786 ShenandoahRootAdjuster* _rp;
787 PreservedMarksSet* _preserved_marks;
788 public:
789 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
790 WorkerTask("Shenandoah Adjust Root Pointers"),
791 _rp(rp),
792 _preserved_marks(preserved_marks) {}
793
794 void work(uint worker_id) {
795 ShenandoahParallelWorkerSession worker_session(worker_id);
796 ShenandoahAdjustPointersClosure cl;
797 _rp->roots_do(worker_id, &cl);
798 _preserved_marks->get(worker_id)->adjust_during_full_gc();
897 }
898
899 size_t live = r->used();
900
901 // Make empty regions that have been allocated into regular
902 if (r->is_empty() && live > 0) {
903 r->make_regular_bypass();
904 }
905
906 // Reclaim regular regions that became empty
907 if (r->is_regular() && live == 0) {
908 r->make_trash();
909 }
910
911 // Recycle all trash regions
912 if (r->is_trash()) {
913 live = 0;
914 r->recycle();
915 }
916
917 r->set_live_data(live);
918 r->reset_alloc_metadata();
919 _live += live;
920 }
921
922 size_t get_live() {
923 return _live;
924 }
925 };
926
927 void ShenandoahFullGC::compact_humongous_objects() {
928 // Compact humongous regions, based on their fwdptr objects.
929 //
930 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
931 // humongous regions are already compacted, and do not require further moves, which alleviates
932 // sliding costs. We may consider doing this in parallel in future.
933
934 ShenandoahHeap* heap = ShenandoahHeap::heap();
935
936 for (size_t c = heap->num_regions(); c > 0; c--) {
937 ShenandoahHeapRegion* r = heap->get_region(c - 1);
938 if (r->is_humongous_start()) {
939 oop old_obj = cast_to_oop(r->bottom());
940 if (!old_obj->is_forwarded()) {
941 // No need to move the object, it stays at the same slot
942 continue;
943 }
944 size_t words_size = old_obj->size();
945 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
946
947 size_t old_start = r->index();
948 size_t old_end = old_start + num_regions - 1;
949 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
950 size_t new_end = new_start + num_regions - 1;
951 assert(old_start != new_start, "must be real move");
952 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
953
954 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
955 heap->get_region(new_start)->bottom(),
956 words_size);
957
958 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
959 new_obj->init_mark();
960
961 {
962 for (size_t c = old_start; c <= old_end; c++) {
963 ShenandoahHeapRegion* r = heap->get_region(c);
964 r->make_regular_bypass();
965 r->set_top(r->bottom());
966 }
967
968 for (size_t c = new_start; c <= new_end; c++) {
969 ShenandoahHeapRegion* r = heap->get_region(c);
970 if (c == new_start) {
971 r->make_humongous_start_bypass();
972 } else {
973 r->make_humongous_cont_bypass();
974 }
975
976 // Trailing region may be non-full, record the remainder there
977 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
978 if ((c == new_end) && (remainder != 0)) {
979 r->set_top(r->bottom() + remainder);
980 } else {
981 r->set_top(r->end());
982 }
983
984 r->reset_alloc_metadata();
985 }
986 }
987 }
988 }
989 }
990
991 // This is slightly different to ShHeap::reset_next_mark_bitmap:
992 // we need to remain able to walk pinned regions.
993 // Since pinned region do not move and don't get compacted, we will get holes with
1032 }
1033
1034 // Compact humongous objects after regular object moves
1035 {
1036 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1037 compact_humongous_objects();
1038 }
1039
1040 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1041 // and must ensure the bitmap is in sync.
1042 {
1043 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1044 ShenandoahMCResetCompleteBitmapTask task;
1045 heap->workers()->run_task(&task);
1046 }
1047
1048 // Bring regions in proper states after the collection, and set heap properties.
1049 {
1050 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1051
1052 ShenandoahPostCompactClosure post_compact;
1053 heap->heap_region_iterate(&post_compact);
1054 heap->set_used(post_compact.get_live());
1055
1056 heap->collection_set()->clear();
1057 heap->free_set()->rebuild();
1058 }
1059
1060 heap->clear_cancelled_gc();
1061 }
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/preservedMarks.inline.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "gc/shared/workerThread.hpp"
32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
36 #include "gc/shenandoah/shenandoahFullGC.hpp"
37 #include "gc/shenandoah/shenandoahGeneration.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahMetrics.hpp"
46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
50 #include "gc/shenandoah/shenandoahUtils.hpp"
51 #include "gc/shenandoah/shenandoahVerifier.hpp"
52 #include "gc/shenandoah/shenandoahVMOperations.hpp"
53 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
54 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
55 #include "memory/metaspaceUtils.hpp"
56 #include "memory/universe.hpp"
57 #include "oops/compressedOops.inline.hpp"
58 #include "oops/oop.inline.hpp"
59 #include "runtime/orderAccess.hpp"
60 #include "runtime/thread.hpp"
61 #include "runtime/vmThread.hpp"
62 #include "utilities/copy.hpp"
63 #include "utilities/events.hpp"
64 #include "utilities/growableArray.hpp"
65
66 // After Full GC is done, reconstruct the remembered set by iterating over OLD regions,
67 // registering all objects between bottom() and top(), and setting remembered set cards to
68 // DIRTY if they hold interesting pointers.
69 class ShenandoahReconstructRememberedSetTask : public WorkerTask {
70 private:
71 ShenandoahRegionIterator _regions;
72
73 public:
74 ShenandoahReconstructRememberedSetTask() :
75 WorkerTask("Shenandoah Reset Bitmap") { }
76
77 void work(uint worker_id) {
78 ShenandoahParallelWorkerSession worker_session(worker_id);
79 ShenandoahHeapRegion* r = _regions.next();
80 ShenandoahHeap* heap = ShenandoahHeap::heap();
81 RememberedScanner* scanner = heap->card_scan();
82 ShenandoahSetRememberedCardsToDirtyClosure dirty_cards_for_interesting_pointers;
83
84 while (r != NULL) {
85 if (r->is_old() && r->is_active()) {
86 HeapWord* obj_addr = r->bottom();
87 if (r->is_humongous_start()) {
88 // First, clear the remembered set
89 oop obj = cast_to_oop(obj_addr);
90 size_t size = obj->size();
91 HeapWord* end_object = r->bottom() + size;
92
93 // First, clear the remembered set for all spanned humongous regions
94 size_t num_regions = (size + ShenandoahHeapRegion::region_size_words() - 1) / ShenandoahHeapRegion::region_size_words();
95 size_t region_span = num_regions * ShenandoahHeapRegion::region_size_words();
96 scanner->reset_remset(r->bottom(), region_span);
97 size_t region_index = r->index();
98 ShenandoahHeapRegion* humongous_region = heap->get_region(region_index);
99 while (num_regions-- != 0) {
100 scanner->reset_object_range(humongous_region->bottom(), humongous_region->end());
101 region_index++;
102 humongous_region = heap->get_region(region_index);
103 }
104
105 // Then register the humongous object and DIRTY relevant remembered set cards
106 scanner->register_object_wo_lock(obj_addr);
107 obj->oop_iterate(&dirty_cards_for_interesting_pointers);
108 } else if (!r->is_humongous()) {
109 // First, clear the remembered set
110 scanner->reset_remset(r->bottom(), ShenandoahHeapRegion::region_size_words());
111 scanner->reset_object_range(r->bottom(), r->end());
112
113 // Then iterate over all objects, registering object and DIRTYing relevant remembered set cards
114 HeapWord* t = r->top();
115 while (obj_addr < t) {
116 oop obj = cast_to_oop(obj_addr);
117 size_t size = obj->size();
118 scanner->register_object_wo_lock(obj_addr);
119 obj_addr += obj->oop_iterate_size(&dirty_cards_for_interesting_pointers);
120 }
121 } // else, ignore humongous continuation region
122 }
123 // else, this region is FREE or YOUNG or inactive and we can ignore it.
124 r = _regions.next();
125 }
126 }
127 };
128
129 ShenandoahFullGC::ShenandoahFullGC() :
130 _gc_timer(ShenandoahHeap::heap()->gc_timer()),
131 _preserved_marks(new PreservedMarksSet(true)) {}
132
133 bool ShenandoahFullGC::collect(GCCause::Cause cause) {
134 vmop_entry_full(cause);
135 // Always success
136 return true;
137 }
138
139 void ShenandoahFullGC::vmop_entry_full(GCCause::Cause cause) {
140 ShenandoahHeap* const heap = ShenandoahHeap::heap();
141 TraceCollectorStats tcs(heap->monitoring_support()->full_stw_collection_counters());
142 ShenandoahTimingsTracker timing(ShenandoahPhaseTimings::full_gc_gross);
143
144 heap->try_inject_alloc_failure();
145 VM_ShenandoahFullGC op(cause, this);
146 VMThread::execute(&op);
147 }
148
149 void ShenandoahFullGC::entry_full(GCCause::Cause cause) {
150 static const char* msg = "Pause Full";
151 ShenandoahPausePhase gc_phase(msg, ShenandoahPhaseTimings::full_gc, true /* log_heap_usage */);
152 EventMark em("%s", msg);
153
154 ShenandoahWorkerScope scope(ShenandoahHeap::heap()->workers(),
155 ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
156 "full gc");
157
158 op_full(cause);
159 }
160
161 void ShenandoahFullGC::op_full(GCCause::Cause cause) {
162 ShenandoahHeap* const heap = ShenandoahHeap::heap();
163 ShenandoahMetricsSnapshot metrics;
164 metrics.snap_before();
165
166 // Perform full GC
167 do_it(cause);
168
169 metrics.snap_after();
170
171 if (metrics.is_good_progress()) {
172 ShenandoahHeap::heap()->notify_gc_progress();
173 } else {
174 // Nothing to do. Tell the allocation path that we have failed to make
175 // progress, and it can finally fail.
176 ShenandoahHeap::heap()->notify_gc_no_progress();
177 }
178 }
179
180 void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
181 ShenandoahHeap* heap = ShenandoahHeap::heap();
182 // Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
183 heap->set_gc_generation(heap->global_generation());
184
185 // There will be no concurrent allocations during full GC so reset these coordination variables.
186 heap->young_generation()->unadjust_available();
187 heap->old_generation()->unadjust_available();
188 // No need to old_gen->increase_used(). That was done when plabs were allocated, accounting for both old evacs and promotions.
189
190 heap->set_alloc_supplement_reserve(0);
191 heap->set_young_evac_reserve(0);
192 heap->set_old_evac_reserve(0);
193 heap->reset_old_evac_expended();
194 heap->set_promotion_reserve(0);
195
196 if (heap->mode()->is_generational()) {
197 // Full GC supersedes any marking or coalescing in old generation.
198 heap->cancel_old_gc();
199 }
200
201 if (ShenandoahVerify) {
202 heap->verifier()->verify_before_fullgc();
203 }
204
205 if (VerifyBeforeGC) {
206 Universe::verify();
207 }
208
209 // Degenerated GC may carry concurrent root flags when upgrading to
210 // full GC. We need to reset it before mutators resume.
211 heap->set_concurrent_strong_root_in_progress(false);
212 heap->set_concurrent_weak_root_in_progress(false);
213
214 heap->set_full_gc_in_progress(true);
215
216 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
217 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
218
219 {
223
224 {
225 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
226 // Full GC is supposed to recover from any GC state:
227
228 // a0. Remember if we have forwarded objects
229 bool has_forwarded_objects = heap->has_forwarded_objects();
230
231 // a1. Cancel evacuation, if in progress
232 if (heap->is_evacuation_in_progress()) {
233 heap->set_evacuation_in_progress(false);
234 }
235 assert(!heap->is_evacuation_in_progress(), "sanity");
236
237 // a2. Cancel update-refs, if in progress
238 if (heap->is_update_refs_in_progress()) {
239 heap->set_update_refs_in_progress(false);
240 }
241 assert(!heap->is_update_refs_in_progress(), "sanity");
242
243 // b. Cancel all concurrent marks, if in progress
244 if (heap->is_concurrent_mark_in_progress()) {
245 heap->cancel_concurrent_mark();
246 }
247 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
248
249 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
250 if (has_forwarded_objects) {
251 update_roots(true /*full_gc*/);
252 }
253
254 // d. Reset the bitmaps for new marking
255 heap->global_generation()->reset_mark_bitmap();
256 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
257 assert(!heap->global_generation()->is_mark_complete(), "sanity");
258
259 // e. Abandon reference discovery and clear all discovered references.
260 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
261 rp->abandon_partial_discovery();
262
263 // f. Sync pinned region status from the CP marks
264 heap->sync_pinned_region_status();
265
266 // The rest of prologue:
267 _preserved_marks->init(heap->workers()->active_workers());
268
269 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
270 }
271
272 if (UseTLAB) {
273 // TODO: Do we need to explicitly retire PLABs?
274 heap->gclabs_retire(ResizeTLAB);
275 heap->tlabs_retire(ResizeTLAB);
276 }
277
278 OrderAccess::fence();
279
280 phase1_mark_heap();
281
282 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
283 // Coming out of Full GC, we would not have any forwarded objects.
284 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
285 heap->set_has_forwarded_objects(false);
286
287 heap->set_full_gc_move_in_progress(true);
288
289 // Setup workers for the rest
290 OrderAccess::fence();
291
292 // Initialize worker slices
293 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
296 }
297
298 {
299 // The rest of code performs region moves, where region status is undefined
300 // until all phases run together.
301 ShenandoahHeapLocker lock(heap->lock());
302
303 phase2_calculate_target_addresses(worker_slices);
304
305 OrderAccess::fence();
306
307 phase3_update_references();
308
309 phase4_compact_objects(worker_slices);
310 }
311
312 {
313 // Epilogue
314 _preserved_marks->restore(heap->workers());
315 _preserved_marks->reclaim();
316
317 if (heap->mode()->is_generational()) {
318 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_reconstruct_remembered_set);
319 ShenandoahReconstructRememberedSetTask task;
320 heap->workers()->run_task(&task);
321 }
322 }
323
324 // Resize metaspace
325 MetaspaceGC::compute_new_size();
326
327 // Free worker slices
328 for (uint i = 0; i < heap->max_workers(); i++) {
329 delete worker_slices[i];
330 }
331 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
332
333 heap->set_full_gc_move_in_progress(false);
334 heap->set_full_gc_in_progress(false);
335
336 if (ShenandoahVerify) {
337 if (heap->mode()->is_generational()) {
338 heap->verifier()->verify_after_generational_fullgc();
339 } else {
340 heap->verifier()->verify_after_fullgc();
341 }
342 }
343
344 if (VerifyAfterGC) {
345 Universe::verify();
346 }
347
348 {
349 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post);
350 heap->post_full_gc_dump(_gc_timer);
351 }
352 }
353
354 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
355 private:
356 ShenandoahMarkingContext* const _ctx;
357
358 public:
359 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
360
361 void heap_region_do(ShenandoahHeapRegion *r) {
362 if (r->affiliation() != FREE) {
363 _ctx->capture_top_at_mark_start(r);
364 r->clear_live_data();
365 }
366 }
367
368 bool is_thread_safe() { return true; }
369 };
370
371 void ShenandoahFullGC::phase1_mark_heap() {
372 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
373 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
374
375 ShenandoahHeap* heap = ShenandoahHeap::heap();
376
377 ShenandoahPrepareForMarkClosure cl;
378 heap->parallel_heap_region_iterate(&cl);
379
380 heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
381
382 ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
383 // enable ("weak") refs discovery
384 rp->set_soft_reference_policy(true); // forcefully purge all soft references
385
386 ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
387 mark.mark();
388 heap->parallel_cleaning(true /* full_gc */);
389 }
390
391 class ShenandoahPrepareForCompactionTask : public WorkerTask {
392 private:
393 PreservedMarksSet* const _preserved_marks;
394 ShenandoahHeap* const _heap;
395 ShenandoahHeapRegionSet** const _worker_slices;
396 size_t const _num_workers;
397
398 public:
399 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices,
400 size_t num_workers);
401
402 static bool is_candidate_region(ShenandoahHeapRegion* r) {
403 // Empty region: get it into the slice to defragment the slice itself.
404 // We could have skipped this without violating correctness, but we really
405 // want to compact all live regions to the start of the heap, which sometimes
406 // means moving them into the fully empty regions.
407 if (r->is_empty()) return true;
408
409 // Can move the region, and this is not the humongous region. Humongous
410 // moves are special cased here, because their moves are handled separately.
411 return r->is_stw_move_allowed() && !r->is_humongous();
412 }
413
414 void work(uint worker_id);
415 };
416
417 class ShenandoahPrepareForGenerationalCompactionObjectClosure : public ObjectClosure {
418 private:
419 ShenandoahPrepareForCompactionTask* _compactor;
420 PreservedMarks* const _preserved_marks;
421 ShenandoahHeap* const _heap;
422
423 // _empty_regions is a thread-local list of heap regions that have been completely emptied by this worker thread's
424 // compaction efforts. The worker thread that drives these efforts adds compacted regions to this list if the
425 // region has not been compacted onto itself.
426 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
427 int _empty_regions_pos;
428 ShenandoahHeapRegion* _old_to_region;
429 ShenandoahHeapRegion* _young_to_region;
430 ShenandoahHeapRegion* _from_region;
431 ShenandoahRegionAffiliation _from_affiliation;
432 HeapWord* _old_compact_point;
433 HeapWord* _young_compact_point;
434 uint _worker_id;
435
436 public:
437 ShenandoahPrepareForGenerationalCompactionObjectClosure(ShenandoahPrepareForCompactionTask* compactor,
438 PreservedMarks* preserved_marks,
439 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
440 ShenandoahHeapRegion* old_to_region,
441 ShenandoahHeapRegion* young_to_region, uint worker_id) :
442 _compactor(compactor),
443 _preserved_marks(preserved_marks),
444 _heap(ShenandoahHeap::heap()),
445 _empty_regions(empty_regions),
446 _empty_regions_pos(0),
447 _old_to_region(old_to_region),
448 _young_to_region(young_to_region),
449 _from_region(NULL),
450 _old_compact_point((old_to_region != nullptr)? old_to_region->bottom(): nullptr),
451 _young_compact_point((young_to_region != nullptr)? young_to_region->bottom(): nullptr),
452 _worker_id(worker_id) {}
453
454 void set_from_region(ShenandoahHeapRegion* from_region) {
455 _from_region = from_region;
456 _from_affiliation = from_region->affiliation();
457 if (_from_region->has_live()) {
458 if (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION) {
459 if (_old_to_region == nullptr) {
460 _old_to_region = from_region;
461 _old_compact_point = from_region->bottom();
462 }
463 } else {
464 assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION, "from_region must be OLD or YOUNG");
465 if (_young_to_region == nullptr) {
466 _young_to_region = from_region;
467 _young_compact_point = from_region->bottom();
468 }
469 }
470 } // else, we won't iterate over this _from_region so we don't need to set up to region to hold copies
471 }
472
473 void finish() {
474 finish_old_region();
475 finish_young_region();
476 }
477
478 void finish_old_region() {
479 if (_old_to_region != nullptr) {
480 log_debug(gc)("Planned compaction into Old Region " SIZE_FORMAT ", used: " SIZE_FORMAT " tabulated by worker %u",
481 _old_to_region->index(), _old_compact_point - _old_to_region->bottom(), _worker_id);
482 _old_to_region->set_new_top(_old_compact_point);
483 _old_to_region = nullptr;
484 }
485 }
486
487 void finish_young_region() {
488 if (_young_to_region != nullptr) {
489 log_debug(gc)("Worker %u planned compaction into Young Region " SIZE_FORMAT ", used: " SIZE_FORMAT,
490 _worker_id, _young_to_region->index(), _young_compact_point - _young_to_region->bottom());
491 _young_to_region->set_new_top(_young_compact_point);
492 _young_to_region = nullptr;
493 }
494 }
495
496 bool is_compact_same_region() {
497 return (_from_region == _old_to_region) || (_from_region == _young_to_region);
498 }
499
500 int empty_regions_pos() {
501 return _empty_regions_pos;
502 }
503
504 void do_object(oop p) {
505 assert(_from_region != NULL, "must set before work");
506 assert((_from_region->bottom() <= cast_from_oop<HeapWord*>(p)) && (cast_from_oop<HeapWord*>(p) < _from_region->top()),
507 "Object must reside in _from_region");
508 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
509 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
510
511 size_t obj_size = p->size();
512 uint from_region_age = _from_region->age();
513 uint object_age = p->age();
514
515 bool promote_object = false;
516 if ((_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION) &&
517 (from_region_age + object_age > InitialTenuringThreshold)) {
518 if ((_old_to_region != nullptr) && (_old_compact_point + obj_size > _old_to_region->end())) {
519 finish_old_region();
520 _old_to_region = nullptr;
521 }
522 if (_old_to_region == nullptr) {
523 if (_empty_regions_pos < _empty_regions.length()) {
524 ShenandoahHeapRegion* new_to_region = _empty_regions.at(_empty_regions_pos);
525 _empty_regions_pos++;
526 new_to_region->set_affiliation(OLD_GENERATION);
527 _old_to_region = new_to_region;
528 _old_compact_point = _old_to_region->bottom();
529 promote_object = true;
530 }
531 // Else this worker thread does not yet have any empty regions into which this aged object can be promoted so
532 // we leave promote_object as false, deferring the promotion.
533 } else {
534 promote_object = true;
535 }
536 }
537
538 if (promote_object || (_from_affiliation == ShenandoahRegionAffiliation::OLD_GENERATION)) {
539 assert(_old_to_region != nullptr, "_old_to_region should not be NULL when evacuating to OLD region");
540 if (_old_compact_point + obj_size > _old_to_region->end()) {
541 ShenandoahHeapRegion* new_to_region;
542
543 log_debug(gc)("Worker %u finishing old region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
544 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _old_to_region->index(),
545 p2i(_old_compact_point), obj_size, p2i(_old_compact_point + obj_size), p2i(_old_to_region->end()));
546
547 // Object does not fit. Get a new _old_to_region.
548 finish_old_region();
549 if (_empty_regions_pos < _empty_regions.length()) {
550 new_to_region = _empty_regions.at(_empty_regions_pos);
551 _empty_regions_pos++;
552 new_to_region->set_affiliation(OLD_GENERATION);
553 } else {
554 // If we've exhausted the previously selected _old_to_region, we know that the _old_to_region is distinct
555 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
556 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
557 new_to_region = _from_region;
558 }
559
560 assert(new_to_region != _old_to_region, "must not reuse same OLD to-region");
561 assert(new_to_region != NULL, "must not be NULL");
562 _old_to_region = new_to_region;
563 _old_compact_point = _old_to_region->bottom();
564 }
565
566 // Object fits into current region, record new location:
567 assert(_old_compact_point + obj_size <= _old_to_region->end(), "must fit");
568 shenandoah_assert_not_forwarded(NULL, p);
569 _preserved_marks->push_if_necessary(p, p->mark());
570 p->forward_to(cast_to_oop(_old_compact_point));
571 _old_compact_point += obj_size;
572 } else {
573 assert(_from_affiliation == ShenandoahRegionAffiliation::YOUNG_GENERATION,
574 "_from_region must be OLD_GENERATION or YOUNG_GENERATION");
575 assert(_young_to_region != nullptr, "_young_to_region should not be NULL when compacting YOUNG _from_region");
576
577 // After full gc compaction, all regions have age 0. Embed the region's age into the object's age in order to preserve
578 // tenuring progress.
579 _heap->increase_object_age(p, from_region_age + 1);
580
581 if (_young_compact_point + obj_size > _young_to_region->end()) {
582 ShenandoahHeapRegion* new_to_region;
583
584 log_debug(gc)("Worker %u finishing young region " SIZE_FORMAT ", compact_point: " PTR_FORMAT ", obj_size: " SIZE_FORMAT
585 ", &compact_point[obj_size]: " PTR_FORMAT ", region end: " PTR_FORMAT, _worker_id, _young_to_region->index(),
586 p2i(_young_compact_point), obj_size, p2i(_young_compact_point + obj_size), p2i(_young_to_region->end()));
587
588 // Object does not fit. Get a new _young_to_region.
589 finish_young_region();
590 if (_empty_regions_pos < _empty_regions.length()) {
591 new_to_region = _empty_regions.at(_empty_regions_pos);
592 _empty_regions_pos++;
593 new_to_region->set_affiliation(YOUNG_GENERATION);
594 } else {
595 // If we've exhausted the previously selected _young_to_region, we know that the _young_to_region is distinct
596 // from _from_region. That's because there is always room for _from_region to be compacted into itself.
597 // Since we're out of empty regions, let's use _from_region to hold the results of its own compaction.
598 new_to_region = _from_region;
599 }
600
601 assert(new_to_region != _young_to_region, "must not reuse same OLD to-region");
602 assert(new_to_region != NULL, "must not be NULL");
603 _young_to_region = new_to_region;
604 _young_compact_point = _young_to_region->bottom();
605 }
606
607 // Object fits into current region, record new location:
608 assert(_young_compact_point + obj_size <= _young_to_region->end(), "must fit");
609 shenandoah_assert_not_forwarded(NULL, p);
610 _preserved_marks->push_if_necessary(p, p->mark());
611 p->forward_to(cast_to_oop(_young_compact_point));
612 _young_compact_point += obj_size;
613 }
614 }
615 };
616
617
618 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
619 private:
620 PreservedMarks* const _preserved_marks;
621 ShenandoahHeap* const _heap;
622 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
623 int _empty_regions_pos;
624 ShenandoahHeapRegion* _to_region;
625 ShenandoahHeapRegion* _from_region;
626 HeapWord* _compact_point;
627
628 public:
629 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
630 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
631 ShenandoahHeapRegion* to_region) :
632 _preserved_marks(preserved_marks),
633 _heap(ShenandoahHeap::heap()),
634 _empty_regions(empty_regions),
635 _empty_regions_pos(0),
636 _to_region(to_region),
637 _from_region(NULL),
638 _compact_point(to_region->bottom()) {}
639
640 void set_from_region(ShenandoahHeapRegion* from_region) {
641 _from_region = from_region;
642 }
643
644 void finish_region() {
645 assert(_to_region != NULL, "should not happen");
646 assert(!_heap->mode()->is_generational(), "Generational GC should use different Closure");
647 _to_region->set_new_top(_compact_point);
648 }
649
650 bool is_compact_same_region() {
651 return _from_region == _to_region;
652 }
653
654 int empty_regions_pos() {
655 return _empty_regions_pos;
656 }
657
658 void do_object(oop p) {
659 assert(_from_region != NULL, "must set before work");
660 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
661 assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
662
663 size_t obj_size = p->size();
664 if (_compact_point + obj_size > _to_region->end()) {
665 finish_region();
666
672 } else {
673 // Out of empty region? Compact within the same region.
674 new_to_region = _from_region;
675 }
676
677 assert(new_to_region != _to_region, "must not reuse same to-region");
678 assert(new_to_region != NULL, "must not be NULL");
679 _to_region = new_to_region;
680 _compact_point = _to_region->bottom();
681 }
682
683 // Object fits into current region, record new location:
684 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
685 shenandoah_assert_not_forwarded(NULL, p);
686 _preserved_marks->push_if_necessary(p, p->mark());
687 p->forward_to(cast_to_oop(_compact_point));
688 _compact_point += obj_size;
689 }
690 };
691
692
693 ShenandoahPrepareForCompactionTask::ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks,
694 ShenandoahHeapRegionSet **worker_slices,
695 size_t num_workers) :
696 WorkerTask("Shenandoah Prepare For Compaction"),
697 _preserved_marks(preserved_marks), _heap(ShenandoahHeap::heap()),
698 _worker_slices(worker_slices), _num_workers(num_workers) { }
699
700
701 void ShenandoahPrepareForCompactionTask::work(uint worker_id) {
702 ShenandoahParallelWorkerSession worker_session(worker_id);
703 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
704 ShenandoahHeapRegionSetIterator it(slice);
705 ShenandoahHeapRegion* from_region = it.next();
706 // No work?
707 if (from_region == NULL) {
708 return;
709 }
710
711 // Sliding compaction. Walk all regions in the slice, and compact them.
712 // Remember empty regions and reuse them as needed.
713 ResourceMark rm;
714
715 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
716
717 if (_heap->mode()->is_generational()) {
718 ShenandoahHeapRegion* old_to_region = (from_region->is_old())? from_region: nullptr;
719 ShenandoahHeapRegion* young_to_region = (from_region->is_young())? from_region: nullptr;
720 ShenandoahPrepareForGenerationalCompactionObjectClosure cl(this, _preserved_marks->get(worker_id), empty_regions,
721 old_to_region, young_to_region, worker_id);
722 while (from_region != NULL) {
723 assert(is_candidate_region(from_region), "Sanity");
724 log_debug(gc)("Worker %u compacting %s Region " SIZE_FORMAT " which had used " SIZE_FORMAT " and %s live",
725 worker_id, affiliation_name(from_region->affiliation()),
726 from_region->index(), from_region->used(), from_region->has_live()? "has": "does not have");
727 cl.set_from_region(from_region);
728 if (from_region->has_live()) {
729 _heap->marked_object_iterate(from_region, &cl);
730 }
731
732 // Compacted the region to somewhere else? From-region is empty then.
733 if (!cl.is_compact_same_region()) {
734 empty_regions.append(from_region);
735 }
736 from_region = it.next();
737 }
738 cl.finish();
739
740 // Mark all remaining regions as empty
741 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
742 ShenandoahHeapRegion* r = empty_regions.at(pos);
743 r->set_new_top(r->bottom());
744 }
745 } else {
746 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
747 while (from_region != NULL) {
748 assert(is_candidate_region(from_region), "Sanity");
749 cl.set_from_region(from_region);
750 if (from_region->has_live()) {
751 _heap->marked_object_iterate(from_region, &cl);
752 }
753
754 // Compacted the region to somewhere else? From-region is empty then.
755 if (!cl.is_compact_same_region()) {
756 empty_regions.append(from_region);
757 }
758 from_region = it.next();
759 }
760 cl.finish_region();
761
762 // Mark all remaining regions as empty
763 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
764 ShenandoahHeapRegion* r = empty_regions.at(pos);
765 r->set_new_top(r->bottom());
766 }
767 }
768 }
769
770 void ShenandoahFullGC::calculate_target_humongous_objects() {
771 ShenandoahHeap* heap = ShenandoahHeap::heap();
772
773 // Compute the new addresses for humongous objects. We need to do this after addresses
774 // for regular objects are calculated, and we know what regions in heap suffix are
775 // available for humongous moves.
776 //
777 // Scan the heap backwards, because we are compacting humongous regions towards the end.
778 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
779 // humongous start there.
780 //
781 // The complication is potential non-movable regions during the scan. If such region is
782 // detected, then sliding restarts towards that non-movable region.
783
784 size_t to_begin = heap->num_regions();
785 size_t to_end = heap->num_regions();
786
787 log_debug(gc)("Full GC calculating target humongous objects from end " SIZE_FORMAT, to_end);
788 for (size_t c = heap->num_regions(); c > 0; c--) {
789 ShenandoahHeapRegion *r = heap->get_region(c - 1);
790 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
791 // To-region candidate: record this, and continue scan
792 to_begin = r->index();
793 continue;
794 }
795
796 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
797 // From-region candidate: movable humongous region
798 oop old_obj = cast_to_oop(r->bottom());
799 size_t words_size = old_obj->size();
800 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
801
802 size_t start = to_end - num_regions;
803
804 if (start >= to_begin && start != r->index()) {
805 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
806 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
807 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
834 }
835 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
836
837 // Record current region occupancy: this communicates empty regions are free
838 // to the rest of Full GC code.
839 r->set_new_top(r->top());
840 }
841 };
842
843 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
844 private:
845 ShenandoahHeap* const _heap;
846 ShenandoahMarkingContext* const _ctx;
847
848 public:
849 ShenandoahTrashImmediateGarbageClosure() :
850 _heap(ShenandoahHeap::heap()),
851 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
852
853 void heap_region_do(ShenandoahHeapRegion* r) {
854 if (r->affiliation() != FREE) {
855 if (r->is_humongous_start()) {
856 oop humongous_obj = cast_to_oop(r->bottom());
857 if (!_ctx->is_marked(humongous_obj)) {
858 assert(!r->has_live(),
859 "Humongous Start %s Region " SIZE_FORMAT " is not marked, should not have live",
860 affiliation_name(r->affiliation()), r->index());
861 log_debug(gc)("Trashing immediate humongous region " SIZE_FORMAT " because not marked", r->index());
862 _heap->trash_humongous_region_at(r);
863 } else {
864 assert(r->has_live(),
865 "Humongous Start %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()), r->index());
866 }
867 } else if (r->is_humongous_continuation()) {
868 // If we hit continuation, the non-live humongous starts should have been trashed already
869 assert(r->humongous_start_region()->has_live(),
870 "Humongous Continuation %s Region " SIZE_FORMAT " should have live", affiliation_name(r->affiliation()), r->index());
871 } else if (r->is_regular()) {
872 if (!r->has_live()) {
873 log_debug(gc)("Trashing immediate regular region " SIZE_FORMAT " because has no live", r->index());
874 r->make_trash_immediate();
875 }
876 }
877 }
878 // else, ignore this FREE region.
879 // TODO: change iterators so they do not process FREE regions.
880 }
881 };
882
883 void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices) {
884 ShenandoahHeap* heap = ShenandoahHeap::heap();
885
886 uint n_workers = heap->workers()->active_workers();
887 size_t n_regions = heap->num_regions();
888
889 // What we want to accomplish: have the dense prefix of data, while still balancing
890 // out the parallel work.
891 //
892 // Assuming the amount of work is driven by the live data that needs moving, we can slice
893 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each
894 // thread takes all regions in its prefix subset, and then it takes some regions from
895 // the tail.
896 //
897 // Tail region selection becomes interesting.
898 //
899 // First, we want to distribute the regions fairly between the workers, and those regions
1026 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
1027 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
1028
1029 ShenandoahHeap* heap = ShenandoahHeap::heap();
1030
1031 // About to figure out which regions can be compacted, make sure pinning status
1032 // had been updated in GC prologue.
1033 heap->assert_pinned_region_status();
1034
1035 {
1036 // Trash the immediately collectible regions before computing addresses
1037 ShenandoahTrashImmediateGarbageClosure tigcl;
1038 heap->heap_region_iterate(&tigcl);
1039
1040 // Make sure regions are in good state: committed, active, clean.
1041 // This is needed because we are potentially sliding the data through them.
1042 ShenandoahEnsureHeapActiveClosure ecl;
1043 heap->heap_region_iterate(&ecl);
1044 }
1045
1046 if (heap->mode()->is_generational()) {
1047 heap->young_generation()->clear_used();
1048 heap->old_generation()->clear_used();
1049 }
1050
1051 // Compute the new addresses for regular objects
1052 {
1053 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
1054
1055 distribute_slices(worker_slices);
1056
1057 size_t num_workers = heap->max_workers();
1058
1059 ResourceMark rm;
1060 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices, num_workers);
1061 heap->workers()->run_task(&task);
1062 }
1063
1064 // Compute the new addresses for humongous objects
1065 {
1066 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
1067 calculate_target_humongous_objects();
1068 }
1069 }
1070
1071 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
1072 private:
1073 ShenandoahHeap* const _heap;
1074 ShenandoahMarkingContext* const _ctx;
1075
1076 template <class T>
1077 inline void do_oop_work(T* p) {
1078 T o = RawAccess<>::oop_load(p);
1079 if (!CompressedOops::is_null(o)) {
1080 oop obj = CompressedOops::decode_not_null(o);
1112
1113 class ShenandoahAdjustPointersTask : public WorkerTask {
1114 private:
1115 ShenandoahHeap* const _heap;
1116 ShenandoahRegionIterator _regions;
1117
1118 public:
1119 ShenandoahAdjustPointersTask() :
1120 WorkerTask("Shenandoah Adjust Pointers"),
1121 _heap(ShenandoahHeap::heap()) {
1122 }
1123
1124 void work(uint worker_id) {
1125 ShenandoahParallelWorkerSession worker_session(worker_id);
1126 ShenandoahAdjustPointersObjectClosure obj_cl;
1127 ShenandoahHeapRegion* r = _regions.next();
1128 while (r != NULL) {
1129 if (!r->is_humongous_continuation() && r->has_live()) {
1130 _heap->marked_object_iterate(r, &obj_cl);
1131 }
1132 if (r->is_pinned() && r->is_old() && r->is_active() && !r->is_humongous()) {
1133 // Pinned regions are not compacted so they may still hold unmarked objects with
1134 // reference to reclaimed memory. Remembered set scanning will crash if it attempts
1135 // to iterate the oops in these objects.
1136 r->begin_preemptible_coalesce_and_fill();
1137 r->oop_fill_and_coalesce_wo_cancel();
1138 }
1139 r = _regions.next();
1140 }
1141 }
1142 };
1143
1144 class ShenandoahAdjustRootPointersTask : public WorkerTask {
1145 private:
1146 ShenandoahRootAdjuster* _rp;
1147 PreservedMarksSet* _preserved_marks;
1148 public:
1149 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
1150 WorkerTask("Shenandoah Adjust Root Pointers"),
1151 _rp(rp),
1152 _preserved_marks(preserved_marks) {}
1153
1154 void work(uint worker_id) {
1155 ShenandoahParallelWorkerSession worker_session(worker_id);
1156 ShenandoahAdjustPointersClosure cl;
1157 _rp->roots_do(worker_id, &cl);
1158 _preserved_marks->get(worker_id)->adjust_during_full_gc();
1257 }
1258
1259 size_t live = r->used();
1260
1261 // Make empty regions that have been allocated into regular
1262 if (r->is_empty() && live > 0) {
1263 r->make_regular_bypass();
1264 }
1265
1266 // Reclaim regular regions that became empty
1267 if (r->is_regular() && live == 0) {
1268 r->make_trash();
1269 }
1270
1271 // Recycle all trash regions
1272 if (r->is_trash()) {
1273 live = 0;
1274 r->recycle();
1275 }
1276
1277 // Update final usage for generations
1278 if (_heap->mode()->is_generational() && live != 0) {
1279 if (r->is_young()) {
1280 _heap->young_generation()->increase_used(live);
1281 } else if (r->is_old()) {
1282 _heap->old_generation()->increase_used(live);
1283 }
1284 }
1285
1286 r->set_live_data(live);
1287 r->reset_alloc_metadata();
1288 _live += live;
1289 }
1290
1291 size_t get_live() {
1292 return _live;
1293 }
1294 };
1295
1296 void ShenandoahFullGC::compact_humongous_objects() {
1297 // Compact humongous regions, based on their fwdptr objects.
1298 //
1299 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1300 // humongous regions are already compacted, and do not require further moves, which alleviates
1301 // sliding costs. We may consider doing this in parallel in future.
1302
1303 ShenandoahHeap* heap = ShenandoahHeap::heap();
1304
1305 for (size_t c = heap->num_regions(); c > 0; c--) {
1306 ShenandoahHeapRegion* r = heap->get_region(c - 1);
1307 if (r->is_humongous_start()) {
1308 oop old_obj = cast_to_oop(r->bottom());
1309 if (!old_obj->is_forwarded()) {
1310 // No need to move the object, it stays at the same slot
1311 continue;
1312 }
1313 size_t words_size = old_obj->size();
1314 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1315
1316 size_t old_start = r->index();
1317 size_t old_end = old_start + num_regions - 1;
1318 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
1319 size_t new_end = new_start + num_regions - 1;
1320 assert(old_start != new_start, "must be real move");
1321 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1322
1323 log_debug(gc)("Full GC compaction moves humongous object from region " SIZE_FORMAT " to region " SIZE_FORMAT,
1324 old_start, new_start);
1325
1326 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
1327 heap->get_region(new_start)->bottom(),
1328 words_size);
1329
1330 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1331 new_obj->init_mark();
1332
1333 {
1334 ShenandoahRegionAffiliation original_affiliation = r->affiliation();
1335 for (size_t c = old_start; c <= old_end; c++) {
1336 ShenandoahHeapRegion* r = heap->get_region(c);
1337 r->make_regular_bypass();
1338 r->set_top(r->bottom());
1339 }
1340
1341 for (size_t c = new_start; c <= new_end; c++) {
1342 ShenandoahHeapRegion* r = heap->get_region(c);
1343 if (c == new_start) {
1344 r->make_humongous_start_bypass(original_affiliation);
1345 } else {
1346 r->make_humongous_cont_bypass(original_affiliation);
1347 }
1348
1349 // Trailing region may be non-full, record the remainder there
1350 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1351 if ((c == new_end) && (remainder != 0)) {
1352 r->set_top(r->bottom() + remainder);
1353 } else {
1354 r->set_top(r->end());
1355 }
1356
1357 r->reset_alloc_metadata();
1358 }
1359 }
1360 }
1361 }
1362 }
1363
1364 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1365 // we need to remain able to walk pinned regions.
1366 // Since pinned region do not move and don't get compacted, we will get holes with
1405 }
1406
1407 // Compact humongous objects after regular object moves
1408 {
1409 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
1410 compact_humongous_objects();
1411 }
1412
1413 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
1414 // and must ensure the bitmap is in sync.
1415 {
1416 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
1417 ShenandoahMCResetCompleteBitmapTask task;
1418 heap->workers()->run_task(&task);
1419 }
1420
1421 // Bring regions in proper states after the collection, and set heap properties.
1422 {
1423 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
1424
1425 if (heap->mode()->is_generational()) {
1426 heap->young_generation()->clear_used();
1427 heap->old_generation()->clear_used();
1428 }
1429
1430 ShenandoahPostCompactClosure post_compact;
1431 heap->heap_region_iterate(&post_compact);
1432 heap->set_used(post_compact.get_live());
1433 if (heap->mode()->is_generational()) {
1434 log_info(gc)("FullGC done: GLOBAL usage: " SIZE_FORMAT ", young usage: " SIZE_FORMAT ", old usage: " SIZE_FORMAT,
1435 post_compact.get_live(), heap->young_generation()->used(), heap->old_generation()->used());
1436 }
1437
1438 heap->collection_set()->clear();
1439 heap->free_set()->rebuild();
1440 }
1441
1442 heap->clear_cancelled_gc(true /* clear oom handler */);
1443 }
|