10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/preservedMarks.inline.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
35 #include "gc/shenandoah/shenandoahFullGC.hpp"
36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
37 #include "gc/shenandoah/shenandoahMark.inline.hpp"
38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
39 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
42 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
43 #include "gc/shenandoah/shenandoahMetrics.hpp"
44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
47 #include "gc/shenandoah/shenandoahSTWMark.hpp"
48 #include "gc/shenandoah/shenandoahUtils.hpp"
49 #include "gc/shenandoah/shenandoahVerifier.hpp"
171 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
172 if (has_forwarded_objects) {
173 update_roots(true /*full_gc*/);
174 }
175
176 // d. Reset the bitmaps for new marking
177 heap->reset_mark_bitmap();
178 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
179 assert(!heap->marking_context()->is_complete(), "sanity");
180
181 // e. Abandon reference discovery and clear all discovered references.
182 ShenandoahReferenceProcessor* rp = heap->ref_processor();
183 rp->abandon_partial_discovery();
184
185 // f. Sync pinned region status from the CP marks
186 heap->sync_pinned_region_status();
187
188 // The rest of prologue:
189 BiasedLocking::preserve_marks();
190 _preserved_marks->init(heap->workers()->active_workers());
191
192 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
193 }
194
195 if (UseTLAB) {
196 heap->gclabs_retire(ResizeTLAB);
197 heap->tlabs_retire(ResizeTLAB);
198 }
199
200 OrderAccess::fence();
201
202 phase1_mark_heap();
203
204 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
205 // Coming out of Full GC, we would not have any forwarded objects.
206 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
207 heap->set_has_forwarded_objects(false);
208
209 heap->set_full_gc_move_in_progress(true);
210
282 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
283
284 ShenandoahHeap* heap = ShenandoahHeap::heap();
285
286 ShenandoahPrepareForMarkClosure cl;
287 heap->heap_region_iterate(&cl);
288
289 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
290
291 ShenandoahReferenceProcessor* rp = heap->ref_processor();
292 // enable ("weak") refs discovery
293 rp->set_soft_reference_policy(true); // forcefully purge all soft references
294
295 ShenandoahSTWMark mark(true /*full_gc*/);
296 mark.mark();
297 heap->parallel_cleaning(true /* full_gc */);
298 }
299
300 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
301 private:
302 PreservedMarks* const _preserved_marks;
303 ShenandoahHeap* const _heap;
304 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
305 int _empty_regions_pos;
306 ShenandoahHeapRegion* _to_region;
307 ShenandoahHeapRegion* _from_region;
308 HeapWord* _compact_point;
309
310 public:
311 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
312 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
313 ShenandoahHeapRegion* to_region) :
314 _preserved_marks(preserved_marks),
315 _heap(ShenandoahHeap::heap()),
316 _empty_regions(empty_regions),
317 _empty_regions_pos(0),
318 _to_region(to_region),
319 _from_region(NULL),
320 _compact_point(to_region->bottom()) {}
321
322 void set_from_region(ShenandoahHeapRegion* from_region) {
323 _from_region = from_region;
324 }
325
326 void finish_region() {
327 assert(_to_region != NULL, "should not happen");
328 _to_region->set_new_top(_compact_point);
329 }
330
331 bool is_compact_same_region() {
332 return _from_region == _to_region;
333 }
334
348 // Object doesn't fit. Pick next empty region and start compacting there.
349 ShenandoahHeapRegion* new_to_region;
350 if (_empty_regions_pos < _empty_regions.length()) {
351 new_to_region = _empty_regions.at(_empty_regions_pos);
352 _empty_regions_pos++;
353 } else {
354 // Out of empty region? Compact within the same region.
355 new_to_region = _from_region;
356 }
357
358 assert(new_to_region != _to_region, "must not reuse same to-region");
359 assert(new_to_region != NULL, "must not be NULL");
360 _to_region = new_to_region;
361 _compact_point = _to_region->bottom();
362 }
363
364 // Object fits into current region, record new location:
365 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
366 shenandoah_assert_not_forwarded(NULL, p);
367 _preserved_marks->push_if_necessary(p, p->mark());
368 p->forward_to(cast_to_oop(_compact_point));
369 _compact_point += obj_size;
370 }
371 };
372
373 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
374 private:
375 PreservedMarksSet* const _preserved_marks;
376 ShenandoahHeap* const _heap;
377 ShenandoahHeapRegionSet** const _worker_slices;
378
379 public:
380 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
381 AbstractGangTask("Shenandoah Prepare For Compaction"),
382 _preserved_marks(preserved_marks),
383 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
384 }
385
386 static bool is_candidate_region(ShenandoahHeapRegion* r) {
387 // Empty region: get it into the slice to defragment the slice itself.
388 // We could have skipped this without violating correctness, but we really
422 }
423
424 // Compacted the region to somewhere else? From-region is empty then.
425 if (!cl.is_compact_same_region()) {
426 empty_regions.append(from_region);
427 }
428 from_region = it.next();
429 }
430 cl.finish_region();
431
432 // Mark all remaining regions as empty
433 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
434 ShenandoahHeapRegion* r = empty_regions.at(pos);
435 r->set_new_top(r->bottom());
436 }
437 }
438 };
439
440 void ShenandoahFullGC::calculate_target_humongous_objects() {
441 ShenandoahHeap* heap = ShenandoahHeap::heap();
442
443 // Compute the new addresses for humongous objects. We need to do this after addresses
444 // for regular objects are calculated, and we know what regions in heap suffix are
445 // available for humongous moves.
446 //
447 // Scan the heap backwards, because we are compacting humongous regions towards the end.
448 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
449 // humongous start there.
450 //
451 // The complication is potential non-movable regions during the scan. If such region is
452 // detected, then sliding restarts towards that non-movable region.
453
454 size_t to_begin = heap->num_regions();
455 size_t to_end = heap->num_regions();
456
457 for (size_t c = heap->num_regions(); c > 0; c--) {
458 ShenandoahHeapRegion *r = heap->get_region(c - 1);
459 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
460 // To-region candidate: record this, and continue scan
461 to_begin = r->index();
462 continue;
463 }
464
465 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
466 // From-region candidate: movable humongous region
467 oop old_obj = cast_to_oop(r->bottom());
468 size_t words_size = old_obj->size();
469 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
470
471 size_t start = to_end - num_regions;
472
473 if (start >= to_begin && start != r->index()) {
474 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
475 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
476 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
477 to_end = start;
478 continue;
479 }
480 }
481
482 // Failed to fit. Scan starting from current region.
483 to_begin = r->index();
484 to_end = r->index();
485 }
486 }
487
488 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
489 private:
490 ShenandoahHeap* const _heap;
491
492 public:
493 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
494 void heap_region_do(ShenandoahHeapRegion* r) {
495 if (r->is_trash()) {
496 r->recycle();
707
708 // Compute the new addresses for regular objects
709 {
710 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
711
712 distribute_slices(worker_slices);
713
714 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
715 heap->workers()->run_task(&task);
716 }
717
718 // Compute the new addresses for humongous objects
719 {
720 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
721 calculate_target_humongous_objects();
722 }
723 }
724
725 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
726 private:
727 ShenandoahHeap* const _heap;
728 ShenandoahMarkingContext* const _ctx;
729
730 template <class T>
731 inline void do_oop_work(T* p) {
732 T o = RawAccess<>::oop_load(p);
733 if (!CompressedOops::is_null(o)) {
734 oop obj = CompressedOops::decode_not_null(o);
735 assert(_ctx->is_marked(obj), "must be marked");
736 if (obj->is_forwarded()) {
737 oop forw = obj->forwardee();
738 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
739 }
740 }
741 }
742
743 public:
744 ShenandoahAdjustPointersClosure() :
745 _heap(ShenandoahHeap::heap()),
746 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
747
748 void do_oop(oop* p) { do_oop_work(p); }
749 void do_oop(narrowOop* p) { do_oop_work(p); }
750 };
751
752 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
753 private:
754 ShenandoahHeap* const _heap;
755 ShenandoahAdjustPointersClosure _cl;
756
757 public:
758 ShenandoahAdjustPointersObjectClosure() :
759 _heap(ShenandoahHeap::heap()) {
760 }
761 void do_object(oop p) {
762 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
763 p->oop_iterate(&_cl);
764 }
765 };
785 }
786 r = _regions.next();
787 }
788 }
789 };
790
791 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
792 private:
793 ShenandoahRootAdjuster* _rp;
794 PreservedMarksSet* _preserved_marks;
795 public:
796 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
797 AbstractGangTask("Shenandoah Adjust Root Pointers"),
798 _rp(rp),
799 _preserved_marks(preserved_marks) {}
800
801 void work(uint worker_id) {
802 ShenandoahParallelWorkerSession worker_session(worker_id);
803 ShenandoahAdjustPointersClosure cl;
804 _rp->roots_do(worker_id, &cl);
805 _preserved_marks->get(worker_id)->adjust_during_full_gc();
806 }
807 };
808
809 void ShenandoahFullGC::phase3_update_references() {
810 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
811 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
812
813 ShenandoahHeap* heap = ShenandoahHeap::heap();
814
815 WorkGang* workers = heap->workers();
816 uint nworkers = workers->active_workers();
817 {
818 #if COMPILER2_OR_JVMCI
819 DerivedPointerTable::clear();
820 #endif
821 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
822 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
823 workers->run_task(&task);
824 #if COMPILER2_OR_JVMCI
825 DerivedPointerTable::update_pointers();
826 #endif
827 }
828
829 ShenandoahAdjustPointersTask adjust_pointers_task;
830 workers->run_task(&adjust_pointers_task);
831 }
832
833 class ShenandoahCompactObjectsClosure : public ObjectClosure {
834 private:
835 ShenandoahHeap* const _heap;
836 uint const _worker_id;
837
838 public:
839 ShenandoahCompactObjectsClosure(uint worker_id) :
840 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
841
842 void do_object(oop p) {
843 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
844 size_t size = (size_t)p->size();
845 if (p->is_forwarded()) {
846 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
847 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
848 Copy::aligned_conjoint_words(compact_from, compact_to, size);
849 oop new_obj = cast_to_oop(compact_to);
850 new_obj->init_mark();
851 }
852 }
853 };
854
855 class ShenandoahCompactObjectsTask : public AbstractGangTask {
856 private:
857 ShenandoahHeap* const _heap;
858 ShenandoahHeapRegionSet** const _worker_slices;
859
860 public:
861 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
862 AbstractGangTask("Shenandoah Compact Objects"),
863 _heap(ShenandoahHeap::heap()),
864 _worker_slices(worker_slices) {
865 }
866
867 void work(uint worker_id) {
922 }
923
924 r->set_live_data(live);
925 r->reset_alloc_metadata();
926 _live += live;
927 }
928
929 size_t get_live() {
930 return _live;
931 }
932 };
933
934 void ShenandoahFullGC::compact_humongous_objects() {
935 // Compact humongous regions, based on their fwdptr objects.
936 //
937 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
938 // humongous regions are already compacted, and do not require further moves, which alleviates
939 // sliding costs. We may consider doing this in parallel in future.
940
941 ShenandoahHeap* heap = ShenandoahHeap::heap();
942
943 for (size_t c = heap->num_regions(); c > 0; c--) {
944 ShenandoahHeapRegion* r = heap->get_region(c - 1);
945 if (r->is_humongous_start()) {
946 oop old_obj = cast_to_oop(r->bottom());
947 if (!old_obj->is_forwarded()) {
948 // No need to move the object, it stays at the same slot
949 continue;
950 }
951 size_t words_size = old_obj->size();
952 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
953
954 size_t old_start = r->index();
955 size_t old_end = old_start + num_regions - 1;
956 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
957 size_t new_end = new_start + num_regions - 1;
958 assert(old_start != new_start, "must be real move");
959 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
960
961 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
962 heap->get_region(new_start)->bottom(),
963 words_size);
964
965 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
966 new_obj->init_mark();
967
968 {
969 for (size_t c = old_start; c <= old_end; c++) {
970 ShenandoahHeapRegion* r = heap->get_region(c);
971 r->make_regular_bypass();
972 r->set_top(r->bottom());
973 }
974
975 for (size_t c = new_start; c <= new_end; c++) {
976 ShenandoahHeapRegion* r = heap->get_region(c);
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/preservedMarks.inline.hpp"
30 #include "gc/shared/slidingForwarding.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
36 #include "gc/shenandoah/shenandoahFullGC.hpp"
37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
44 #include "gc/shenandoah/shenandoahMetrics.hpp"
45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
46 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
47 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
48 #include "gc/shenandoah/shenandoahSTWMark.hpp"
49 #include "gc/shenandoah/shenandoahUtils.hpp"
50 #include "gc/shenandoah/shenandoahVerifier.hpp"
172 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
173 if (has_forwarded_objects) {
174 update_roots(true /*full_gc*/);
175 }
176
177 // d. Reset the bitmaps for new marking
178 heap->reset_mark_bitmap();
179 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
180 assert(!heap->marking_context()->is_complete(), "sanity");
181
182 // e. Abandon reference discovery and clear all discovered references.
183 ShenandoahReferenceProcessor* rp = heap->ref_processor();
184 rp->abandon_partial_discovery();
185
186 // f. Sync pinned region status from the CP marks
187 heap->sync_pinned_region_status();
188
189 // The rest of prologue:
190 BiasedLocking::preserve_marks();
191 _preserved_marks->init(heap->workers()->active_workers());
192 heap->forwarding()->clear();
193
194 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
195 }
196
197 if (UseTLAB) {
198 heap->gclabs_retire(ResizeTLAB);
199 heap->tlabs_retire(ResizeTLAB);
200 }
201
202 OrderAccess::fence();
203
204 phase1_mark_heap();
205
206 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
207 // Coming out of Full GC, we would not have any forwarded objects.
208 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
209 heap->set_has_forwarded_objects(false);
210
211 heap->set_full_gc_move_in_progress(true);
212
284 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
285
286 ShenandoahHeap* heap = ShenandoahHeap::heap();
287
288 ShenandoahPrepareForMarkClosure cl;
289 heap->heap_region_iterate(&cl);
290
291 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
292
293 ShenandoahReferenceProcessor* rp = heap->ref_processor();
294 // enable ("weak") refs discovery
295 rp->set_soft_reference_policy(true); // forcefully purge all soft references
296
297 ShenandoahSTWMark mark(true /*full_gc*/);
298 mark.mark();
299 heap->parallel_cleaning(true /* full_gc */);
300 }
301
302 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
303 private:
304 PreservedMarks* const _preserved_marks;
305 SlidingForwarding* const _forwarding;
306 ShenandoahHeap* const _heap;
307 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
308 int _empty_regions_pos;
309 ShenandoahHeapRegion* _to_region;
310 ShenandoahHeapRegion* _from_region;
311 HeapWord* _compact_point;
312
313 public:
314 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
315 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
316 ShenandoahHeapRegion* to_region) :
317 _preserved_marks(preserved_marks),
318 _forwarding(ShenandoahHeap::heap()->forwarding()),
319 _heap(ShenandoahHeap::heap()),
320 _empty_regions(empty_regions),
321 _empty_regions_pos(0),
322 _to_region(to_region),
323 _from_region(NULL),
324 _compact_point(to_region->bottom()) {}
325
326 void set_from_region(ShenandoahHeapRegion* from_region) {
327 _from_region = from_region;
328 }
329
330 void finish_region() {
331 assert(_to_region != NULL, "should not happen");
332 _to_region->set_new_top(_compact_point);
333 }
334
335 bool is_compact_same_region() {
336 return _from_region == _to_region;
337 }
338
352 // Object doesn't fit. Pick next empty region and start compacting there.
353 ShenandoahHeapRegion* new_to_region;
354 if (_empty_regions_pos < _empty_regions.length()) {
355 new_to_region = _empty_regions.at(_empty_regions_pos);
356 _empty_regions_pos++;
357 } else {
358 // Out of empty region? Compact within the same region.
359 new_to_region = _from_region;
360 }
361
362 assert(new_to_region != _to_region, "must not reuse same to-region");
363 assert(new_to_region != NULL, "must not be NULL");
364 _to_region = new_to_region;
365 _compact_point = _to_region->bottom();
366 }
367
368 // Object fits into current region, record new location:
369 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
370 shenandoah_assert_not_forwarded(NULL, p);
371 _preserved_marks->push_if_necessary(p, p->mark());
372 _forwarding->forward_to(p, cast_to_oop(_compact_point));
373 _compact_point += obj_size;
374 }
375 };
376
377 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
378 private:
379 PreservedMarksSet* const _preserved_marks;
380 ShenandoahHeap* const _heap;
381 ShenandoahHeapRegionSet** const _worker_slices;
382
383 public:
384 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
385 AbstractGangTask("Shenandoah Prepare For Compaction"),
386 _preserved_marks(preserved_marks),
387 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
388 }
389
390 static bool is_candidate_region(ShenandoahHeapRegion* r) {
391 // Empty region: get it into the slice to defragment the slice itself.
392 // We could have skipped this without violating correctness, but we really
426 }
427
428 // Compacted the region to somewhere else? From-region is empty then.
429 if (!cl.is_compact_same_region()) {
430 empty_regions.append(from_region);
431 }
432 from_region = it.next();
433 }
434 cl.finish_region();
435
436 // Mark all remaining regions as empty
437 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
438 ShenandoahHeapRegion* r = empty_regions.at(pos);
439 r->set_new_top(r->bottom());
440 }
441 }
442 };
443
444 void ShenandoahFullGC::calculate_target_humongous_objects() {
445 ShenandoahHeap* heap = ShenandoahHeap::heap();
446 SlidingForwarding* forwarding = heap->forwarding();
447
448 // Compute the new addresses for humongous objects. We need to do this after addresses
449 // for regular objects are calculated, and we know what regions in heap suffix are
450 // available for humongous moves.
451 //
452 // Scan the heap backwards, because we are compacting humongous regions towards the end.
453 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
454 // humongous start there.
455 //
456 // The complication is potential non-movable regions during the scan. If such region is
457 // detected, then sliding restarts towards that non-movable region.
458
459 size_t to_begin = heap->num_regions();
460 size_t to_end = heap->num_regions();
461
462 for (size_t c = heap->num_regions(); c > 0; c--) {
463 ShenandoahHeapRegion *r = heap->get_region(c - 1);
464 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
465 // To-region candidate: record this, and continue scan
466 to_begin = r->index();
467 continue;
468 }
469
470 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
471 // From-region candidate: movable humongous region
472 oop old_obj = cast_to_oop(r->bottom());
473 size_t words_size = old_obj->size();
474 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
475
476 size_t start = to_end - num_regions;
477
478 if (start >= to_begin && start != r->index()) {
479 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
480 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
481 forwarding->forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
482 to_end = start;
483 continue;
484 }
485 }
486
487 // Failed to fit. Scan starting from current region.
488 to_begin = r->index();
489 to_end = r->index();
490 }
491 }
492
493 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
494 private:
495 ShenandoahHeap* const _heap;
496
497 public:
498 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
499 void heap_region_do(ShenandoahHeapRegion* r) {
500 if (r->is_trash()) {
501 r->recycle();
712
713 // Compute the new addresses for regular objects
714 {
715 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
716
717 distribute_slices(worker_slices);
718
719 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
720 heap->workers()->run_task(&task);
721 }
722
723 // Compute the new addresses for humongous objects
724 {
725 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
726 calculate_target_humongous_objects();
727 }
728 }
729
730 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
731 private:
732 ShenandoahHeap* const _heap;
733 const SlidingForwarding* const _forwarding;
734 ShenandoahMarkingContext* const _ctx;
735
736 template <class T>
737 inline void do_oop_work(T* p) {
738 T o = RawAccess<>::oop_load(p);
739 if (!CompressedOops::is_null(o)) {
740 oop obj = CompressedOops::decode_not_null(o);
741 assert(_ctx->is_marked(obj), "must be marked");
742 if (obj->is_forwarded()) {
743 oop forw = _forwarding->forwardee(obj);
744 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
745 }
746 }
747 }
748
749 public:
750 ShenandoahAdjustPointersClosure() :
751 _heap(ShenandoahHeap::heap()),
752 _forwarding(_heap->forwarding()),
753 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
754
755 void do_oop(oop* p) { do_oop_work(p); }
756 void do_oop(narrowOop* p) { do_oop_work(p); }
757 };
758
759 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
760 private:
761 ShenandoahHeap* const _heap;
762 ShenandoahAdjustPointersClosure _cl;
763
764 public:
765 ShenandoahAdjustPointersObjectClosure() :
766 _heap(ShenandoahHeap::heap()) {
767 }
768 void do_object(oop p) {
769 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
770 p->oop_iterate(&_cl);
771 }
772 };
792 }
793 r = _regions.next();
794 }
795 }
796 };
797
798 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
799 private:
800 ShenandoahRootAdjuster* _rp;
801 PreservedMarksSet* _preserved_marks;
802 public:
803 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
804 AbstractGangTask("Shenandoah Adjust Root Pointers"),
805 _rp(rp),
806 _preserved_marks(preserved_marks) {}
807
808 void work(uint worker_id) {
809 ShenandoahParallelWorkerSession worker_session(worker_id);
810 ShenandoahAdjustPointersClosure cl;
811 _rp->roots_do(worker_id, &cl);
812 const SlidingForwarding* const forwarding = ShenandoahHeap::heap()->forwarding();
813 _preserved_marks->get(worker_id)->adjust_during_full_gc(forwarding);
814 }
815 };
816
817 void ShenandoahFullGC::phase3_update_references() {
818 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
819 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
820
821 ShenandoahHeap* heap = ShenandoahHeap::heap();
822
823 WorkGang* workers = heap->workers();
824 uint nworkers = workers->active_workers();
825 {
826 #if COMPILER2_OR_JVMCI
827 DerivedPointerTable::clear();
828 #endif
829 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
830 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
831 workers->run_task(&task);
832 #if COMPILER2_OR_JVMCI
833 DerivedPointerTable::update_pointers();
834 #endif
835 }
836
837 ShenandoahAdjustPointersTask adjust_pointers_task;
838 workers->run_task(&adjust_pointers_task);
839 }
840
841 class ShenandoahCompactObjectsClosure : public ObjectClosure {
842 private:
843 ShenandoahHeap* const _heap;
844 const SlidingForwarding* const _forwarding;
845 uint const _worker_id;
846
847 public:
848 ShenandoahCompactObjectsClosure(uint worker_id) :
849 _heap(ShenandoahHeap::heap()), _forwarding(_heap->forwarding()), _worker_id(worker_id) {}
850
851 void do_object(oop p) {
852 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
853 size_t size = (size_t)p->size();
854 if (p->is_forwarded()) {
855 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
856 HeapWord* compact_to = cast_from_oop<HeapWord*>(_forwarding->forwardee(p));
857 Copy::aligned_conjoint_words(compact_from, compact_to, size);
858 oop new_obj = cast_to_oop(compact_to);
859 new_obj->init_mark();
860 }
861 }
862 };
863
864 class ShenandoahCompactObjectsTask : public AbstractGangTask {
865 private:
866 ShenandoahHeap* const _heap;
867 ShenandoahHeapRegionSet** const _worker_slices;
868
869 public:
870 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
871 AbstractGangTask("Shenandoah Compact Objects"),
872 _heap(ShenandoahHeap::heap()),
873 _worker_slices(worker_slices) {
874 }
875
876 void work(uint worker_id) {
931 }
932
933 r->set_live_data(live);
934 r->reset_alloc_metadata();
935 _live += live;
936 }
937
938 size_t get_live() {
939 return _live;
940 }
941 };
942
943 void ShenandoahFullGC::compact_humongous_objects() {
944 // Compact humongous regions, based on their fwdptr objects.
945 //
946 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
947 // humongous regions are already compacted, and do not require further moves, which alleviates
948 // sliding costs. We may consider doing this in parallel in future.
949
950 ShenandoahHeap* heap = ShenandoahHeap::heap();
951 const SlidingForwarding* const forwarding = heap->forwarding();
952
953 for (size_t c = heap->num_regions(); c > 0; c--) {
954 ShenandoahHeapRegion* r = heap->get_region(c - 1);
955 if (r->is_humongous_start()) {
956 oop old_obj = cast_to_oop(r->bottom());
957 if (!old_obj->is_forwarded()) {
958 // No need to move the object, it stays at the same slot
959 continue;
960 }
961 size_t words_size = old_obj->size();
962 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
963
964 size_t old_start = r->index();
965 size_t old_end = old_start + num_regions - 1;
966 size_t new_start = heap->heap_region_index_containing(forwarding->forwardee(old_obj));
967 size_t new_end = new_start + num_regions - 1;
968 assert(old_start != new_start, "must be real move");
969 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
970
971 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
972 heap->get_region(new_start)->bottom(),
973 words_size);
974
975 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
976 new_obj->init_mark();
977
978 {
979 for (size_t c = old_start; c <= old_end; c++) {
980 ShenandoahHeapRegion* r = heap->get_region(c);
981 r->make_regular_bypass();
982 r->set_top(r->bottom());
983 }
984
985 for (size_t c = new_start; c <= new_end; c++) {
986 ShenandoahHeapRegion* r = heap->get_region(c);
|