11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/continuationGCSupport.hpp"
29 #include "gc/shared/gcTraceTime.inline.hpp"
30 #include "gc/shared/preservedMarks.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "gc/shared/workerThread.hpp"
33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
37 #include "gc/shenandoah/shenandoahFullGC.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahMetrics.hpp"
46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
50 #include "gc/shenandoah/shenandoahUtils.hpp"
170
171 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
172 if (has_forwarded_objects) {
173 update_roots(true /*full_gc*/);
174 }
175
176 // d. Reset the bitmaps for new marking
177 heap->reset_mark_bitmap();
178 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
179 assert(!heap->marking_context()->is_complete(), "sanity");
180
181 // e. Abandon reference discovery and clear all discovered references.
182 ShenandoahReferenceProcessor* rp = heap->ref_processor();
183 rp->abandon_partial_discovery();
184
185 // f. Sync pinned region status from the CP marks
186 heap->sync_pinned_region_status();
187
188 // The rest of prologue:
189 _preserved_marks->init(heap->workers()->active_workers());
190
191 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
192 }
193
194 if (UseTLAB) {
195 heap->gclabs_retire(ResizeTLAB);
196 heap->tlabs_retire(ResizeTLAB);
197 }
198
199 OrderAccess::fence();
200
201 phase1_mark_heap();
202
203 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
204 // Coming out of Full GC, we would not have any forwarded objects.
205 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
206 heap->set_has_forwarded_objects(false);
207
208 heap->set_full_gc_move_in_progress(true);
209
280 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
281
282 ShenandoahHeap* heap = ShenandoahHeap::heap();
283
284 ShenandoahPrepareForMarkClosure cl;
285 heap->heap_region_iterate(&cl);
286
287 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
288
289 ShenandoahReferenceProcessor* rp = heap->ref_processor();
290 // enable ("weak") refs discovery
291 rp->set_soft_reference_policy(true); // forcefully purge all soft references
292
293 ShenandoahSTWMark mark(true /*full_gc*/);
294 mark.mark();
295 heap->parallel_cleaning(true /* full_gc */);
296 }
297
298 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
299 private:
300 PreservedMarks* const _preserved_marks;
301 ShenandoahHeap* const _heap;
302 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
303 int _empty_regions_pos;
304 ShenandoahHeapRegion* _to_region;
305 ShenandoahHeapRegion* _from_region;
306 HeapWord* _compact_point;
307
308 public:
309 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
310 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
311 ShenandoahHeapRegion* to_region) :
312 _preserved_marks(preserved_marks),
313 _heap(ShenandoahHeap::heap()),
314 _empty_regions(empty_regions),
315 _empty_regions_pos(0),
316 _to_region(to_region),
317 _from_region(nullptr),
318 _compact_point(to_region->bottom()) {}
319
320 void set_from_region(ShenandoahHeapRegion* from_region) {
321 _from_region = from_region;
322 }
323
324 void finish_region() {
325 assert(_to_region != nullptr, "should not happen");
326 _to_region->set_new_top(_compact_point);
327 }
328
329 bool is_compact_same_region() {
330 return _from_region == _to_region;
331 }
332
346 // Object doesn't fit. Pick next empty region and start compacting there.
347 ShenandoahHeapRegion* new_to_region;
348 if (_empty_regions_pos < _empty_regions.length()) {
349 new_to_region = _empty_regions.at(_empty_regions_pos);
350 _empty_regions_pos++;
351 } else {
352 // Out of empty region? Compact within the same region.
353 new_to_region = _from_region;
354 }
355
356 assert(new_to_region != _to_region, "must not reuse same to-region");
357 assert(new_to_region != nullptr, "must not be null");
358 _to_region = new_to_region;
359 _compact_point = _to_region->bottom();
360 }
361
362 // Object fits into current region, record new location:
363 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
364 shenandoah_assert_not_forwarded(nullptr, p);
365 _preserved_marks->push_if_necessary(p, p->mark());
366 p->forward_to(cast_to_oop(_compact_point));
367 _compact_point += obj_size;
368 }
369 };
370
371 class ShenandoahPrepareForCompactionTask : public WorkerTask {
372 private:
373 PreservedMarksSet* const _preserved_marks;
374 ShenandoahHeap* const _heap;
375 ShenandoahHeapRegionSet** const _worker_slices;
376
377 public:
378 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
379 WorkerTask("Shenandoah Prepare For Compaction"),
380 _preserved_marks(preserved_marks),
381 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
382 }
383
384 static bool is_candidate_region(ShenandoahHeapRegion* r) {
385 // Empty region: get it into the slice to defragment the slice itself.
386 // We could have skipped this without violating correctness, but we really
420 }
421
422 // Compacted the region to somewhere else? From-region is empty then.
423 if (!cl.is_compact_same_region()) {
424 empty_regions.append(from_region);
425 }
426 from_region = it.next();
427 }
428 cl.finish_region();
429
430 // Mark all remaining regions as empty
431 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
432 ShenandoahHeapRegion* r = empty_regions.at(pos);
433 r->set_new_top(r->bottom());
434 }
435 }
436 };
437
438 void ShenandoahFullGC::calculate_target_humongous_objects() {
439 ShenandoahHeap* heap = ShenandoahHeap::heap();
440
441 // Compute the new addresses for humongous objects. We need to do this after addresses
442 // for regular objects are calculated, and we know what regions in heap suffix are
443 // available for humongous moves.
444 //
445 // Scan the heap backwards, because we are compacting humongous regions towards the end.
446 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
447 // humongous start there.
448 //
449 // The complication is potential non-movable regions during the scan. If such region is
450 // detected, then sliding restarts towards that non-movable region.
451
452 size_t to_begin = heap->num_regions();
453 size_t to_end = heap->num_regions();
454
455 for (size_t c = heap->num_regions(); c > 0; c--) {
456 ShenandoahHeapRegion *r = heap->get_region(c - 1);
457 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
458 // To-region candidate: record this, and continue scan
459 to_begin = r->index();
460 continue;
461 }
462
463 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
464 // From-region candidate: movable humongous region
465 oop old_obj = cast_to_oop(r->bottom());
466 size_t words_size = old_obj->size();
467 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
468
469 size_t start = to_end - num_regions;
470
471 if (start >= to_begin && start != r->index()) {
472 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
473 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
474 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
475 to_end = start;
476 continue;
477 }
478 }
479
480 // Failed to fit. Scan starting from current region.
481 to_begin = r->index();
482 to_end = r->index();
483 }
484 }
485
486 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
487 private:
488 ShenandoahHeap* const _heap;
489
490 public:
491 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
492 void heap_region_do(ShenandoahHeapRegion* r) {
493 if (r->is_trash()) {
494 r->recycle();
705
706 // Compute the new addresses for regular objects
707 {
708 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
709
710 distribute_slices(worker_slices);
711
712 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
713 heap->workers()->run_task(&task);
714 }
715
716 // Compute the new addresses for humongous objects
717 {
718 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
719 calculate_target_humongous_objects();
720 }
721 }
722
723 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
724 private:
725 ShenandoahHeap* const _heap;
726 ShenandoahMarkingContext* const _ctx;
727
728 template <class T>
729 inline void do_oop_work(T* p) {
730 T o = RawAccess<>::oop_load(p);
731 if (!CompressedOops::is_null(o)) {
732 oop obj = CompressedOops::decode_not_null(o);
733 assert(_ctx->is_marked(obj), "must be marked");
734 if (obj->is_forwarded()) {
735 oop forw = obj->forwardee();
736 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
737 }
738 }
739 }
740
741 public:
742 ShenandoahAdjustPointersClosure() :
743 _heap(ShenandoahHeap::heap()),
744 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
745
746 void do_oop(oop* p) { do_oop_work(p); }
747 void do_oop(narrowOop* p) { do_oop_work(p); }
748 void do_method(Method* m) {}
749 void do_nmethod(nmethod* nm) {}
750 };
751
752 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
753 private:
754 ShenandoahHeap* const _heap;
755 ShenandoahAdjustPointersClosure _cl;
756
757 public:
758 ShenandoahAdjustPointersObjectClosure() :
759 _heap(ShenandoahHeap::heap()) {
760 }
761 void do_object(oop p) {
762 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
763 p->oop_iterate(&_cl);
785 }
786 r = _regions.next();
787 }
788 }
789 };
790
791 class ShenandoahAdjustRootPointersTask : public WorkerTask {
792 private:
793 ShenandoahRootAdjuster* _rp;
794 PreservedMarksSet* _preserved_marks;
795 public:
796 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
797 WorkerTask("Shenandoah Adjust Root Pointers"),
798 _rp(rp),
799 _preserved_marks(preserved_marks) {}
800
801 void work(uint worker_id) {
802 ShenandoahParallelWorkerSession worker_session(worker_id);
803 ShenandoahAdjustPointersClosure cl;
804 _rp->roots_do(worker_id, &cl);
805 _preserved_marks->get(worker_id)->adjust_during_full_gc();
806 }
807 };
808
809 void ShenandoahFullGC::phase3_update_references() {
810 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
811 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
812
813 ShenandoahHeap* heap = ShenandoahHeap::heap();
814
815 WorkerThreads* workers = heap->workers();
816 uint nworkers = workers->active_workers();
817 {
818 #if COMPILER2_OR_JVMCI
819 DerivedPointerTable::clear();
820 #endif
821 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
822 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
823 workers->run_task(&task);
824 #if COMPILER2_OR_JVMCI
825 DerivedPointerTable::update_pointers();
826 #endif
827 }
828
829 ShenandoahAdjustPointersTask adjust_pointers_task;
830 workers->run_task(&adjust_pointers_task);
831 }
832
833 class ShenandoahCompactObjectsClosure : public ObjectClosure {
834 private:
835 ShenandoahHeap* const _heap;
836 uint const _worker_id;
837
838 public:
839 ShenandoahCompactObjectsClosure(uint worker_id) :
840 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
841
842 void do_object(oop p) {
843 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
844 size_t size = p->size();
845 if (p->is_forwarded()) {
846 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
847 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
848 Copy::aligned_conjoint_words(compact_from, compact_to, size);
849 oop new_obj = cast_to_oop(compact_to);
850
851 ContinuationGCSupport::relativize_stack_chunk(new_obj);
852 new_obj->init_mark();
853 }
854 }
855 };
856
857 class ShenandoahCompactObjectsTask : public WorkerTask {
858 private:
859 ShenandoahHeap* const _heap;
860 ShenandoahHeapRegionSet** const _worker_slices;
861
862 public:
863 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
864 WorkerTask("Shenandoah Compact Objects"),
865 _heap(ShenandoahHeap::heap()),
866 _worker_slices(worker_slices) {
867 }
924 }
925
926 r->set_live_data(live);
927 r->reset_alloc_metadata();
928 _live += live;
929 }
930
931 size_t get_live() {
932 return _live;
933 }
934 };
935
936 void ShenandoahFullGC::compact_humongous_objects() {
937 // Compact humongous regions, based on their fwdptr objects.
938 //
939 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
940 // humongous regions are already compacted, and do not require further moves, which alleviates
941 // sliding costs. We may consider doing this in parallel in future.
942
943 ShenandoahHeap* heap = ShenandoahHeap::heap();
944
945 for (size_t c = heap->num_regions(); c > 0; c--) {
946 ShenandoahHeapRegion* r = heap->get_region(c - 1);
947 if (r->is_humongous_start()) {
948 oop old_obj = cast_to_oop(r->bottom());
949 if (!old_obj->is_forwarded()) {
950 // No need to move the object, it stays at the same slot
951 continue;
952 }
953 size_t words_size = old_obj->size();
954 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
955
956 size_t old_start = r->index();
957 size_t old_end = old_start + num_regions - 1;
958 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
959 size_t new_end = new_start + num_regions - 1;
960 assert(old_start != new_start, "must be real move");
961 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
962
963 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
964 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
965
966 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
967 new_obj->init_mark();
968
969 {
970 for (size_t c = old_start; c <= old_end; c++) {
971 ShenandoahHeapRegion* r = heap->get_region(c);
972 r->make_regular_bypass();
973 r->set_top(r->bottom());
974 }
975
976 for (size_t c = new_start; c <= new_end; c++) {
977 ShenandoahHeapRegion* r = heap->get_region(c);
978 if (c == new_start) {
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/continuationGCSupport.hpp"
29 #include "gc/shared/gcTraceTime.inline.hpp"
30 #include "gc/shared/preservedMarks.inline.hpp"
31 #include "gc/shared/slidingForwarding.inline.hpp"
32 #include "gc/shared/tlab_globals.hpp"
33 #include "gc/shared/workerThread.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
38 #include "gc/shenandoah/shenandoahFullGC.hpp"
39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
44 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
46 #include "gc/shenandoah/shenandoahMetrics.hpp"
47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
48 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
49 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
50 #include "gc/shenandoah/shenandoahSTWMark.hpp"
51 #include "gc/shenandoah/shenandoahUtils.hpp"
171
172 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
173 if (has_forwarded_objects) {
174 update_roots(true /*full_gc*/);
175 }
176
177 // d. Reset the bitmaps for new marking
178 heap->reset_mark_bitmap();
179 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
180 assert(!heap->marking_context()->is_complete(), "sanity");
181
182 // e. Abandon reference discovery and clear all discovered references.
183 ShenandoahReferenceProcessor* rp = heap->ref_processor();
184 rp->abandon_partial_discovery();
185
186 // f. Sync pinned region status from the CP marks
187 heap->sync_pinned_region_status();
188
189 // The rest of prologue:
190 _preserved_marks->init(heap->workers()->active_workers());
191 heap->forwarding()->clear();
192
193 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
194 }
195
196 if (UseTLAB) {
197 heap->gclabs_retire(ResizeTLAB);
198 heap->tlabs_retire(ResizeTLAB);
199 }
200
201 OrderAccess::fence();
202
203 phase1_mark_heap();
204
205 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
206 // Coming out of Full GC, we would not have any forwarded objects.
207 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
208 heap->set_has_forwarded_objects(false);
209
210 heap->set_full_gc_move_in_progress(true);
211
282 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
283
284 ShenandoahHeap* heap = ShenandoahHeap::heap();
285
286 ShenandoahPrepareForMarkClosure cl;
287 heap->heap_region_iterate(&cl);
288
289 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
290
291 ShenandoahReferenceProcessor* rp = heap->ref_processor();
292 // enable ("weak") refs discovery
293 rp->set_soft_reference_policy(true); // forcefully purge all soft references
294
295 ShenandoahSTWMark mark(true /*full_gc*/);
296 mark.mark();
297 heap->parallel_cleaning(true /* full_gc */);
298 }
299
300 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
301 private:
302 PreservedMarks* const _preserved_marks;
303 SlidingForwarding* const _forwarding;
304 ShenandoahHeap* const _heap;
305 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
306 int _empty_regions_pos;
307 ShenandoahHeapRegion* _to_region;
308 ShenandoahHeapRegion* _from_region;
309 HeapWord* _compact_point;
310
311 public:
312 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
313 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
314 ShenandoahHeapRegion* to_region) :
315 _preserved_marks(preserved_marks),
316 _forwarding(ShenandoahHeap::heap()->forwarding()),
317 _heap(ShenandoahHeap::heap()),
318 _empty_regions(empty_regions),
319 _empty_regions_pos(0),
320 _to_region(to_region),
321 _from_region(nullptr),
322 _compact_point(to_region->bottom()) {}
323
324 void set_from_region(ShenandoahHeapRegion* from_region) {
325 _from_region = from_region;
326 }
327
328 void finish_region() {
329 assert(_to_region != nullptr, "should not happen");
330 _to_region->set_new_top(_compact_point);
331 }
332
333 bool is_compact_same_region() {
334 return _from_region == _to_region;
335 }
336
350 // Object doesn't fit. Pick next empty region and start compacting there.
351 ShenandoahHeapRegion* new_to_region;
352 if (_empty_regions_pos < _empty_regions.length()) {
353 new_to_region = _empty_regions.at(_empty_regions_pos);
354 _empty_regions_pos++;
355 } else {
356 // Out of empty region? Compact within the same region.
357 new_to_region = _from_region;
358 }
359
360 assert(new_to_region != _to_region, "must not reuse same to-region");
361 assert(new_to_region != nullptr, "must not be null");
362 _to_region = new_to_region;
363 _compact_point = _to_region->bottom();
364 }
365
366 // Object fits into current region, record new location:
367 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
368 shenandoah_assert_not_forwarded(nullptr, p);
369 _preserved_marks->push_if_necessary(p, p->mark());
370 _forwarding->forward_to(p, cast_to_oop(_compact_point));
371 _compact_point += obj_size;
372 }
373 };
374
375 class ShenandoahPrepareForCompactionTask : public WorkerTask {
376 private:
377 PreservedMarksSet* const _preserved_marks;
378 ShenandoahHeap* const _heap;
379 ShenandoahHeapRegionSet** const _worker_slices;
380
381 public:
382 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
383 WorkerTask("Shenandoah Prepare For Compaction"),
384 _preserved_marks(preserved_marks),
385 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
386 }
387
388 static bool is_candidate_region(ShenandoahHeapRegion* r) {
389 // Empty region: get it into the slice to defragment the slice itself.
390 // We could have skipped this without violating correctness, but we really
424 }
425
426 // Compacted the region to somewhere else? From-region is empty then.
427 if (!cl.is_compact_same_region()) {
428 empty_regions.append(from_region);
429 }
430 from_region = it.next();
431 }
432 cl.finish_region();
433
434 // Mark all remaining regions as empty
435 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
436 ShenandoahHeapRegion* r = empty_regions.at(pos);
437 r->set_new_top(r->bottom());
438 }
439 }
440 };
441
442 void ShenandoahFullGC::calculate_target_humongous_objects() {
443 ShenandoahHeap* heap = ShenandoahHeap::heap();
444 SlidingForwarding* forwarding = heap->forwarding();
445
446 // Compute the new addresses for humongous objects. We need to do this after addresses
447 // for regular objects are calculated, and we know what regions in heap suffix are
448 // available for humongous moves.
449 //
450 // Scan the heap backwards, because we are compacting humongous regions towards the end.
451 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
452 // humongous start there.
453 //
454 // The complication is potential non-movable regions during the scan. If such region is
455 // detected, then sliding restarts towards that non-movable region.
456
457 size_t to_begin = heap->num_regions();
458 size_t to_end = heap->num_regions();
459
460 for (size_t c = heap->num_regions(); c > 0; c--) {
461 ShenandoahHeapRegion *r = heap->get_region(c - 1);
462 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
463 // To-region candidate: record this, and continue scan
464 to_begin = r->index();
465 continue;
466 }
467
468 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
469 // From-region candidate: movable humongous region
470 oop old_obj = cast_to_oop(r->bottom());
471 size_t words_size = old_obj->size();
472 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
473
474 size_t start = to_end - num_regions;
475
476 if (start >= to_begin && start != r->index()) {
477 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
478 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
479 forwarding->forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
480 to_end = start;
481 continue;
482 }
483 }
484
485 // Failed to fit. Scan starting from current region.
486 to_begin = r->index();
487 to_end = r->index();
488 }
489 }
490
491 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
492 private:
493 ShenandoahHeap* const _heap;
494
495 public:
496 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
497 void heap_region_do(ShenandoahHeapRegion* r) {
498 if (r->is_trash()) {
499 r->recycle();
710
711 // Compute the new addresses for regular objects
712 {
713 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
714
715 distribute_slices(worker_slices);
716
717 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
718 heap->workers()->run_task(&task);
719 }
720
721 // Compute the new addresses for humongous objects
722 {
723 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
724 calculate_target_humongous_objects();
725 }
726 }
727
728 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
729 private:
730 ShenandoahHeap* const _heap;
731 const SlidingForwarding* const _forwarding;
732 ShenandoahMarkingContext* const _ctx;
733
734 template <class T>
735 inline void do_oop_work(T* p) {
736 T o = RawAccess<>::oop_load(p);
737 if (!CompressedOops::is_null(o)) {
738 oop obj = CompressedOops::decode_not_null(o);
739 assert(_ctx->is_marked(obj), "must be marked");
740 if (obj->is_forwarded()) {
741 oop forw = _forwarding->forwardee(obj);
742 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
743 }
744 }
745 }
746
747 public:
748 ShenandoahAdjustPointersClosure() :
749 _heap(ShenandoahHeap::heap()),
750 _forwarding(_heap->forwarding()),
751 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
752
753 void do_oop(oop* p) { do_oop_work(p); }
754 void do_oop(narrowOop* p) { do_oop_work(p); }
755 void do_method(Method* m) {}
756 void do_nmethod(nmethod* nm) {}
757 };
758
759 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
760 private:
761 ShenandoahHeap* const _heap;
762 ShenandoahAdjustPointersClosure _cl;
763
764 public:
765 ShenandoahAdjustPointersObjectClosure() :
766 _heap(ShenandoahHeap::heap()) {
767 }
768 void do_object(oop p) {
769 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
770 p->oop_iterate(&_cl);
792 }
793 r = _regions.next();
794 }
795 }
796 };
797
798 class ShenandoahAdjustRootPointersTask : public WorkerTask {
799 private:
800 ShenandoahRootAdjuster* _rp;
801 PreservedMarksSet* _preserved_marks;
802 public:
803 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
804 WorkerTask("Shenandoah Adjust Root Pointers"),
805 _rp(rp),
806 _preserved_marks(preserved_marks) {}
807
808 void work(uint worker_id) {
809 ShenandoahParallelWorkerSession worker_session(worker_id);
810 ShenandoahAdjustPointersClosure cl;
811 _rp->roots_do(worker_id, &cl);
812 const SlidingForwarding* const forwarding = ShenandoahHeap::heap()->forwarding();
813 _preserved_marks->get(worker_id)->adjust_during_full_gc(forwarding);
814 }
815 };
816
817 void ShenandoahFullGC::phase3_update_references() {
818 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
819 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
820
821 ShenandoahHeap* heap = ShenandoahHeap::heap();
822
823 WorkerThreads* workers = heap->workers();
824 uint nworkers = workers->active_workers();
825 {
826 #if COMPILER2_OR_JVMCI
827 DerivedPointerTable::clear();
828 #endif
829 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
830 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
831 workers->run_task(&task);
832 #if COMPILER2_OR_JVMCI
833 DerivedPointerTable::update_pointers();
834 #endif
835 }
836
837 ShenandoahAdjustPointersTask adjust_pointers_task;
838 workers->run_task(&adjust_pointers_task);
839 }
840
841 class ShenandoahCompactObjectsClosure : public ObjectClosure {
842 private:
843 ShenandoahHeap* const _heap;
844 const SlidingForwarding* const _forwarding;
845 uint const _worker_id;
846
847 public:
848 ShenandoahCompactObjectsClosure(uint worker_id) :
849 _heap(ShenandoahHeap::heap()), _forwarding(_heap->forwarding()), _worker_id(worker_id) {}
850
851 void do_object(oop p) {
852 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
853 size_t size = p->size();
854 if (p->is_forwarded()) {
855 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
856 HeapWord* compact_to = cast_from_oop<HeapWord*>(_forwarding->forwardee(p));
857 Copy::aligned_conjoint_words(compact_from, compact_to, size);
858 oop new_obj = cast_to_oop(compact_to);
859
860 ContinuationGCSupport::relativize_stack_chunk(new_obj);
861 new_obj->init_mark();
862 }
863 }
864 };
865
866 class ShenandoahCompactObjectsTask : public WorkerTask {
867 private:
868 ShenandoahHeap* const _heap;
869 ShenandoahHeapRegionSet** const _worker_slices;
870
871 public:
872 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
873 WorkerTask("Shenandoah Compact Objects"),
874 _heap(ShenandoahHeap::heap()),
875 _worker_slices(worker_slices) {
876 }
933 }
934
935 r->set_live_data(live);
936 r->reset_alloc_metadata();
937 _live += live;
938 }
939
940 size_t get_live() {
941 return _live;
942 }
943 };
944
945 void ShenandoahFullGC::compact_humongous_objects() {
946 // Compact humongous regions, based on their fwdptr objects.
947 //
948 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
949 // humongous regions are already compacted, and do not require further moves, which alleviates
950 // sliding costs. We may consider doing this in parallel in future.
951
952 ShenandoahHeap* heap = ShenandoahHeap::heap();
953 const SlidingForwarding* const forwarding = heap->forwarding();
954
955 for (size_t c = heap->num_regions(); c > 0; c--) {
956 ShenandoahHeapRegion* r = heap->get_region(c - 1);
957 if (r->is_humongous_start()) {
958 oop old_obj = cast_to_oop(r->bottom());
959 if (!old_obj->is_forwarded()) {
960 // No need to move the object, it stays at the same slot
961 continue;
962 }
963 size_t words_size = old_obj->size();
964 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
965
966 size_t old_start = r->index();
967 size_t old_end = old_start + num_regions - 1;
968 size_t new_start = heap->heap_region_index_containing(forwarding->forwardee(old_obj));
969 size_t new_end = new_start + num_regions - 1;
970 assert(old_start != new_start, "must be real move");
971 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
972
973 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
974 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
975
976 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
977 new_obj->init_mark();
978
979 {
980 for (size_t c = old_start; c <= old_end; c++) {
981 ShenandoahHeapRegion* r = heap->get_region(c);
982 r->make_regular_bypass();
983 r->set_top(r->bottom());
984 }
985
986 for (size_t c = new_start; c <= new_end; c++) {
987 ShenandoahHeapRegion* r = heap->get_region(c);
988 if (c == new_start) {
|