10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/preservedMarks.inline.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "gc/shared/workerThread.hpp"
32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
36 #include "gc/shenandoah/shenandoahFullGC.hpp"
37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
44 #include "gc/shenandoah/shenandoahMetrics.hpp"
45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
46 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
47 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
48 #include "gc/shenandoah/shenandoahSTWMark.hpp"
49 #include "gc/shenandoah/shenandoahUtils.hpp"
165
166 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
167 if (has_forwarded_objects) {
168 update_roots(true /*full_gc*/);
169 }
170
171 // d. Reset the bitmaps for new marking
172 heap->reset_mark_bitmap();
173 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
174 assert(!heap->marking_context()->is_complete(), "sanity");
175
176 // e. Abandon reference discovery and clear all discovered references.
177 ShenandoahReferenceProcessor* rp = heap->ref_processor();
178 rp->abandon_partial_discovery();
179
180 // f. Sync pinned region status from the CP marks
181 heap->sync_pinned_region_status();
182
183 // The rest of prologue:
184 _preserved_marks->init(heap->workers()->active_workers());
185
186 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
187 }
188
189 if (UseTLAB) {
190 heap->gclabs_retire(ResizeTLAB);
191 heap->tlabs_retire(ResizeTLAB);
192 }
193
194 OrderAccess::fence();
195
196 phase1_mark_heap();
197
198 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
199 // Coming out of Full GC, we would not have any forwarded objects.
200 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
201 heap->set_has_forwarded_objects(false);
202
203 heap->set_full_gc_move_in_progress(true);
204
275 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
276
277 ShenandoahHeap* heap = ShenandoahHeap::heap();
278
279 ShenandoahPrepareForMarkClosure cl;
280 heap->heap_region_iterate(&cl);
281
282 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
283
284 ShenandoahReferenceProcessor* rp = heap->ref_processor();
285 // enable ("weak") refs discovery
286 rp->set_soft_reference_policy(true); // forcefully purge all soft references
287
288 ShenandoahSTWMark mark(true /*full_gc*/);
289 mark.mark();
290 heap->parallel_cleaning(true /* full_gc */);
291 }
292
293 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
294 private:
295 PreservedMarks* const _preserved_marks;
296 ShenandoahHeap* const _heap;
297 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
298 int _empty_regions_pos;
299 ShenandoahHeapRegion* _to_region;
300 ShenandoahHeapRegion* _from_region;
301 HeapWord* _compact_point;
302
303 public:
304 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
305 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
306 ShenandoahHeapRegion* to_region) :
307 _preserved_marks(preserved_marks),
308 _heap(ShenandoahHeap::heap()),
309 _empty_regions(empty_regions),
310 _empty_regions_pos(0),
311 _to_region(to_region),
312 _from_region(NULL),
313 _compact_point(to_region->bottom()) {}
314
315 void set_from_region(ShenandoahHeapRegion* from_region) {
316 _from_region = from_region;
317 }
318
319 void finish_region() {
320 assert(_to_region != NULL, "should not happen");
321 _to_region->set_new_top(_compact_point);
322 }
323
324 bool is_compact_same_region() {
325 return _from_region == _to_region;
326 }
327
341 // Object doesn't fit. Pick next empty region and start compacting there.
342 ShenandoahHeapRegion* new_to_region;
343 if (_empty_regions_pos < _empty_regions.length()) {
344 new_to_region = _empty_regions.at(_empty_regions_pos);
345 _empty_regions_pos++;
346 } else {
347 // Out of empty region? Compact within the same region.
348 new_to_region = _from_region;
349 }
350
351 assert(new_to_region != _to_region, "must not reuse same to-region");
352 assert(new_to_region != NULL, "must not be NULL");
353 _to_region = new_to_region;
354 _compact_point = _to_region->bottom();
355 }
356
357 // Object fits into current region, record new location:
358 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
359 shenandoah_assert_not_forwarded(NULL, p);
360 _preserved_marks->push_if_necessary(p, p->mark());
361 p->forward_to(cast_to_oop(_compact_point));
362 _compact_point += obj_size;
363 }
364 };
365
366 class ShenandoahPrepareForCompactionTask : public WorkerTask {
367 private:
368 PreservedMarksSet* const _preserved_marks;
369 ShenandoahHeap* const _heap;
370 ShenandoahHeapRegionSet** const _worker_slices;
371
372 public:
373 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
374 WorkerTask("Shenandoah Prepare For Compaction"),
375 _preserved_marks(preserved_marks),
376 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
377 }
378
379 static bool is_candidate_region(ShenandoahHeapRegion* r) {
380 // Empty region: get it into the slice to defragment the slice itself.
381 // We could have skipped this without violating correctness, but we really
415 }
416
417 // Compacted the region to somewhere else? From-region is empty then.
418 if (!cl.is_compact_same_region()) {
419 empty_regions.append(from_region);
420 }
421 from_region = it.next();
422 }
423 cl.finish_region();
424
425 // Mark all remaining regions as empty
426 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
427 ShenandoahHeapRegion* r = empty_regions.at(pos);
428 r->set_new_top(r->bottom());
429 }
430 }
431 };
432
433 void ShenandoahFullGC::calculate_target_humongous_objects() {
434 ShenandoahHeap* heap = ShenandoahHeap::heap();
435
436 // Compute the new addresses for humongous objects. We need to do this after addresses
437 // for regular objects are calculated, and we know what regions in heap suffix are
438 // available for humongous moves.
439 //
440 // Scan the heap backwards, because we are compacting humongous regions towards the end.
441 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
442 // humongous start there.
443 //
444 // The complication is potential non-movable regions during the scan. If such region is
445 // detected, then sliding restarts towards that non-movable region.
446
447 size_t to_begin = heap->num_regions();
448 size_t to_end = heap->num_regions();
449
450 for (size_t c = heap->num_regions(); c > 0; c--) {
451 ShenandoahHeapRegion *r = heap->get_region(c - 1);
452 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
453 // To-region candidate: record this, and continue scan
454 to_begin = r->index();
455 continue;
456 }
457
458 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
459 // From-region candidate: movable humongous region
460 oop old_obj = cast_to_oop(r->bottom());
461 size_t words_size = old_obj->size();
462 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
463
464 size_t start = to_end - num_regions;
465
466 if (start >= to_begin && start != r->index()) {
467 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
468 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
469 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
470 to_end = start;
471 continue;
472 }
473 }
474
475 // Failed to fit. Scan starting from current region.
476 to_begin = r->index();
477 to_end = r->index();
478 }
479 }
480
481 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
482 private:
483 ShenandoahHeap* const _heap;
484
485 public:
486 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
487 void heap_region_do(ShenandoahHeapRegion* r) {
488 if (r->is_trash()) {
489 r->recycle();
700
701 // Compute the new addresses for regular objects
702 {
703 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
704
705 distribute_slices(worker_slices);
706
707 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
708 heap->workers()->run_task(&task);
709 }
710
711 // Compute the new addresses for humongous objects
712 {
713 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
714 calculate_target_humongous_objects();
715 }
716 }
717
718 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
719 private:
720 ShenandoahHeap* const _heap;
721 ShenandoahMarkingContext* const _ctx;
722
723 template <class T>
724 inline void do_oop_work(T* p) {
725 T o = RawAccess<>::oop_load(p);
726 if (!CompressedOops::is_null(o)) {
727 oop obj = CompressedOops::decode_not_null(o);
728 assert(_ctx->is_marked(obj), "must be marked");
729 if (obj->is_forwarded()) {
730 oop forw = obj->forwardee();
731 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
732 }
733 }
734 }
735
736 public:
737 ShenandoahAdjustPointersClosure() :
738 _heap(ShenandoahHeap::heap()),
739 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
740
741 void do_oop(oop* p) { do_oop_work(p); }
742 void do_oop(narrowOop* p) { do_oop_work(p); }
743 };
744
745 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
746 private:
747 ShenandoahHeap* const _heap;
748 ShenandoahAdjustPointersClosure _cl;
749
750 public:
751 ShenandoahAdjustPointersObjectClosure() :
752 _heap(ShenandoahHeap::heap()) {
753 }
754 void do_object(oop p) {
755 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
756 p->oop_iterate(&_cl);
757 }
758 };
778 }
779 r = _regions.next();
780 }
781 }
782 };
783
784 class ShenandoahAdjustRootPointersTask : public WorkerTask {
785 private:
786 ShenandoahRootAdjuster* _rp;
787 PreservedMarksSet* _preserved_marks;
788 public:
789 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
790 WorkerTask("Shenandoah Adjust Root Pointers"),
791 _rp(rp),
792 _preserved_marks(preserved_marks) {}
793
794 void work(uint worker_id) {
795 ShenandoahParallelWorkerSession worker_session(worker_id);
796 ShenandoahAdjustPointersClosure cl;
797 _rp->roots_do(worker_id, &cl);
798 _preserved_marks->get(worker_id)->adjust_during_full_gc();
799 }
800 };
801
802 void ShenandoahFullGC::phase3_update_references() {
803 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
804 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
805
806 ShenandoahHeap* heap = ShenandoahHeap::heap();
807
808 WorkerThreads* workers = heap->workers();
809 uint nworkers = workers->active_workers();
810 {
811 #if COMPILER2_OR_JVMCI
812 DerivedPointerTable::clear();
813 #endif
814 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
815 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
816 workers->run_task(&task);
817 #if COMPILER2_OR_JVMCI
818 DerivedPointerTable::update_pointers();
819 #endif
820 }
821
822 ShenandoahAdjustPointersTask adjust_pointers_task;
823 workers->run_task(&adjust_pointers_task);
824 }
825
826 class ShenandoahCompactObjectsClosure : public ObjectClosure {
827 private:
828 ShenandoahHeap* const _heap;
829 uint const _worker_id;
830
831 public:
832 ShenandoahCompactObjectsClosure(uint worker_id) :
833 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
834
835 void do_object(oop p) {
836 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
837 size_t size = p->size();
838 if (p->is_forwarded()) {
839 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
840 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
841 Copy::aligned_conjoint_words(compact_from, compact_to, size);
842 oop new_obj = cast_to_oop(compact_to);
843 new_obj->init_mark();
844 }
845 }
846 };
847
848 class ShenandoahCompactObjectsTask : public WorkerTask {
849 private:
850 ShenandoahHeap* const _heap;
851 ShenandoahHeapRegionSet** const _worker_slices;
852
853 public:
854 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
855 WorkerTask("Shenandoah Compact Objects"),
856 _heap(ShenandoahHeap::heap()),
857 _worker_slices(worker_slices) {
858 }
859
860 void work(uint worker_id) {
915 }
916
917 r->set_live_data(live);
918 r->reset_alloc_metadata();
919 _live += live;
920 }
921
922 size_t get_live() {
923 return _live;
924 }
925 };
926
927 void ShenandoahFullGC::compact_humongous_objects() {
928 // Compact humongous regions, based on their fwdptr objects.
929 //
930 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
931 // humongous regions are already compacted, and do not require further moves, which alleviates
932 // sliding costs. We may consider doing this in parallel in future.
933
934 ShenandoahHeap* heap = ShenandoahHeap::heap();
935
936 for (size_t c = heap->num_regions(); c > 0; c--) {
937 ShenandoahHeapRegion* r = heap->get_region(c - 1);
938 if (r->is_humongous_start()) {
939 oop old_obj = cast_to_oop(r->bottom());
940 if (!old_obj->is_forwarded()) {
941 // No need to move the object, it stays at the same slot
942 continue;
943 }
944 size_t words_size = old_obj->size();
945 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
946
947 size_t old_start = r->index();
948 size_t old_end = old_start + num_regions - 1;
949 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
950 size_t new_end = new_start + num_regions - 1;
951 assert(old_start != new_start, "must be real move");
952 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
953
954 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
955 heap->get_region(new_start)->bottom(),
956 words_size);
957
958 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
959 new_obj->init_mark();
960
961 {
962 for (size_t c = old_start; c <= old_end; c++) {
963 ShenandoahHeapRegion* r = heap->get_region(c);
964 r->make_regular_bypass();
965 r->set_top(r->bottom());
966 }
967
968 for (size_t c = new_start; c <= new_end; c++) {
969 ShenandoahHeapRegion* r = heap->get_region(c);
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/preservedMarks.inline.hpp"
30 #include "gc/shared/slidingForwarding.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "gc/shared/workerThread.hpp"
33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
37 #include "gc/shenandoah/shenandoahFullGC.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahMetrics.hpp"
46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
50 #include "gc/shenandoah/shenandoahUtils.hpp"
166
167 // c. Update roots if this full GC is due to evac-oom, which may carry from-space pointers in roots.
168 if (has_forwarded_objects) {
169 update_roots(true /*full_gc*/);
170 }
171
172 // d. Reset the bitmaps for new marking
173 heap->reset_mark_bitmap();
174 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
175 assert(!heap->marking_context()->is_complete(), "sanity");
176
177 // e. Abandon reference discovery and clear all discovered references.
178 ShenandoahReferenceProcessor* rp = heap->ref_processor();
179 rp->abandon_partial_discovery();
180
181 // f. Sync pinned region status from the CP marks
182 heap->sync_pinned_region_status();
183
184 // The rest of prologue:
185 _preserved_marks->init(heap->workers()->active_workers());
186 heap->forwarding()->clear();
187
188 assert(heap->has_forwarded_objects() == has_forwarded_objects, "This should not change");
189 }
190
191 if (UseTLAB) {
192 heap->gclabs_retire(ResizeTLAB);
193 heap->tlabs_retire(ResizeTLAB);
194 }
195
196 OrderAccess::fence();
197
198 phase1_mark_heap();
199
200 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
201 // Coming out of Full GC, we would not have any forwarded objects.
202 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
203 heap->set_has_forwarded_objects(false);
204
205 heap->set_full_gc_move_in_progress(true);
206
277 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
278
279 ShenandoahHeap* heap = ShenandoahHeap::heap();
280
281 ShenandoahPrepareForMarkClosure cl;
282 heap->heap_region_iterate(&cl);
283
284 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
285
286 ShenandoahReferenceProcessor* rp = heap->ref_processor();
287 // enable ("weak") refs discovery
288 rp->set_soft_reference_policy(true); // forcefully purge all soft references
289
290 ShenandoahSTWMark mark(true /*full_gc*/);
291 mark.mark();
292 heap->parallel_cleaning(true /* full_gc */);
293 }
294
295 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
296 private:
297 PreservedMarks* const _preserved_marks;
298 SlidingForwarding* const _forwarding;
299 ShenandoahHeap* const _heap;
300 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
301 int _empty_regions_pos;
302 ShenandoahHeapRegion* _to_region;
303 ShenandoahHeapRegion* _from_region;
304 HeapWord* _compact_point;
305
306 public:
307 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
308 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
309 ShenandoahHeapRegion* to_region) :
310 _preserved_marks(preserved_marks),
311 _forwarding(ShenandoahHeap::heap()->forwarding()),
312 _heap(ShenandoahHeap::heap()),
313 _empty_regions(empty_regions),
314 _empty_regions_pos(0),
315 _to_region(to_region),
316 _from_region(NULL),
317 _compact_point(to_region->bottom()) {}
318
319 void set_from_region(ShenandoahHeapRegion* from_region) {
320 _from_region = from_region;
321 }
322
323 void finish_region() {
324 assert(_to_region != NULL, "should not happen");
325 _to_region->set_new_top(_compact_point);
326 }
327
328 bool is_compact_same_region() {
329 return _from_region == _to_region;
330 }
331
345 // Object doesn't fit. Pick next empty region and start compacting there.
346 ShenandoahHeapRegion* new_to_region;
347 if (_empty_regions_pos < _empty_regions.length()) {
348 new_to_region = _empty_regions.at(_empty_regions_pos);
349 _empty_regions_pos++;
350 } else {
351 // Out of empty region? Compact within the same region.
352 new_to_region = _from_region;
353 }
354
355 assert(new_to_region != _to_region, "must not reuse same to-region");
356 assert(new_to_region != NULL, "must not be NULL");
357 _to_region = new_to_region;
358 _compact_point = _to_region->bottom();
359 }
360
361 // Object fits into current region, record new location:
362 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
363 shenandoah_assert_not_forwarded(NULL, p);
364 _preserved_marks->push_if_necessary(p, p->mark());
365 _forwarding->forward_to(p, cast_to_oop(_compact_point));
366 _compact_point += obj_size;
367 }
368 };
369
370 class ShenandoahPrepareForCompactionTask : public WorkerTask {
371 private:
372 PreservedMarksSet* const _preserved_marks;
373 ShenandoahHeap* const _heap;
374 ShenandoahHeapRegionSet** const _worker_slices;
375
376 public:
377 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
378 WorkerTask("Shenandoah Prepare For Compaction"),
379 _preserved_marks(preserved_marks),
380 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
381 }
382
383 static bool is_candidate_region(ShenandoahHeapRegion* r) {
384 // Empty region: get it into the slice to defragment the slice itself.
385 // We could have skipped this without violating correctness, but we really
419 }
420
421 // Compacted the region to somewhere else? From-region is empty then.
422 if (!cl.is_compact_same_region()) {
423 empty_regions.append(from_region);
424 }
425 from_region = it.next();
426 }
427 cl.finish_region();
428
429 // Mark all remaining regions as empty
430 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
431 ShenandoahHeapRegion* r = empty_regions.at(pos);
432 r->set_new_top(r->bottom());
433 }
434 }
435 };
436
437 void ShenandoahFullGC::calculate_target_humongous_objects() {
438 ShenandoahHeap* heap = ShenandoahHeap::heap();
439 SlidingForwarding* forwarding = heap->forwarding();
440
441 // Compute the new addresses for humongous objects. We need to do this after addresses
442 // for regular objects are calculated, and we know what regions in heap suffix are
443 // available for humongous moves.
444 //
445 // Scan the heap backwards, because we are compacting humongous regions towards the end.
446 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
447 // humongous start there.
448 //
449 // The complication is potential non-movable regions during the scan. If such region is
450 // detected, then sliding restarts towards that non-movable region.
451
452 size_t to_begin = heap->num_regions();
453 size_t to_end = heap->num_regions();
454
455 for (size_t c = heap->num_regions(); c > 0; c--) {
456 ShenandoahHeapRegion *r = heap->get_region(c - 1);
457 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
458 // To-region candidate: record this, and continue scan
459 to_begin = r->index();
460 continue;
461 }
462
463 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
464 // From-region candidate: movable humongous region
465 oop old_obj = cast_to_oop(r->bottom());
466 size_t words_size = old_obj->size();
467 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
468
469 size_t start = to_end - num_regions;
470
471 if (start >= to_begin && start != r->index()) {
472 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
473 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
474 forwarding->forward_to(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
475 to_end = start;
476 continue;
477 }
478 }
479
480 // Failed to fit. Scan starting from current region.
481 to_begin = r->index();
482 to_end = r->index();
483 }
484 }
485
486 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
487 private:
488 ShenandoahHeap* const _heap;
489
490 public:
491 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
492 void heap_region_do(ShenandoahHeapRegion* r) {
493 if (r->is_trash()) {
494 r->recycle();
705
706 // Compute the new addresses for regular objects
707 {
708 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
709
710 distribute_slices(worker_slices);
711
712 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
713 heap->workers()->run_task(&task);
714 }
715
716 // Compute the new addresses for humongous objects
717 {
718 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
719 calculate_target_humongous_objects();
720 }
721 }
722
723 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
724 private:
725 ShenandoahHeap* const _heap;
726 const SlidingForwarding* const _forwarding;
727 ShenandoahMarkingContext* const _ctx;
728
729 template <class T>
730 inline void do_oop_work(T* p) {
731 T o = RawAccess<>::oop_load(p);
732 if (!CompressedOops::is_null(o)) {
733 oop obj = CompressedOops::decode_not_null(o);
734 assert(_ctx->is_marked(obj), "must be marked");
735 if (obj->is_forwarded()) {
736 oop forw = _forwarding->forwardee(obj);
737 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
738 }
739 }
740 }
741
742 public:
743 ShenandoahAdjustPointersClosure() :
744 _heap(ShenandoahHeap::heap()),
745 _forwarding(_heap->forwarding()),
746 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
747
748 void do_oop(oop* p) { do_oop_work(p); }
749 void do_oop(narrowOop* p) { do_oop_work(p); }
750 };
751
752 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
753 private:
754 ShenandoahHeap* const _heap;
755 ShenandoahAdjustPointersClosure _cl;
756
757 public:
758 ShenandoahAdjustPointersObjectClosure() :
759 _heap(ShenandoahHeap::heap()) {
760 }
761 void do_object(oop p) {
762 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
763 p->oop_iterate(&_cl);
764 }
765 };
785 }
786 r = _regions.next();
787 }
788 }
789 };
790
791 class ShenandoahAdjustRootPointersTask : public WorkerTask {
792 private:
793 ShenandoahRootAdjuster* _rp;
794 PreservedMarksSet* _preserved_marks;
795 public:
796 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
797 WorkerTask("Shenandoah Adjust Root Pointers"),
798 _rp(rp),
799 _preserved_marks(preserved_marks) {}
800
801 void work(uint worker_id) {
802 ShenandoahParallelWorkerSession worker_session(worker_id);
803 ShenandoahAdjustPointersClosure cl;
804 _rp->roots_do(worker_id, &cl);
805 const SlidingForwarding* const forwarding = ShenandoahHeap::heap()->forwarding();
806 _preserved_marks->get(worker_id)->adjust_during_full_gc(forwarding);
807 }
808 };
809
810 void ShenandoahFullGC::phase3_update_references() {
811 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
812 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
813
814 ShenandoahHeap* heap = ShenandoahHeap::heap();
815
816 WorkerThreads* workers = heap->workers();
817 uint nworkers = workers->active_workers();
818 {
819 #if COMPILER2_OR_JVMCI
820 DerivedPointerTable::clear();
821 #endif
822 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
823 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
824 workers->run_task(&task);
825 #if COMPILER2_OR_JVMCI
826 DerivedPointerTable::update_pointers();
827 #endif
828 }
829
830 ShenandoahAdjustPointersTask adjust_pointers_task;
831 workers->run_task(&adjust_pointers_task);
832 }
833
834 class ShenandoahCompactObjectsClosure : public ObjectClosure {
835 private:
836 ShenandoahHeap* const _heap;
837 const SlidingForwarding* const _forwarding;
838 uint const _worker_id;
839
840 public:
841 ShenandoahCompactObjectsClosure(uint worker_id) :
842 _heap(ShenandoahHeap::heap()), _forwarding(_heap->forwarding()), _worker_id(worker_id) {}
843
844 void do_object(oop p) {
845 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
846 size_t size = p->size();
847 if (p->is_forwarded()) {
848 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
849 HeapWord* compact_to = cast_from_oop<HeapWord*>(_forwarding->forwardee(p));
850 Copy::aligned_conjoint_words(compact_from, compact_to, size);
851 oop new_obj = cast_to_oop(compact_to);
852 new_obj->init_mark();
853 }
854 }
855 };
856
857 class ShenandoahCompactObjectsTask : public WorkerTask {
858 private:
859 ShenandoahHeap* const _heap;
860 ShenandoahHeapRegionSet** const _worker_slices;
861
862 public:
863 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
864 WorkerTask("Shenandoah Compact Objects"),
865 _heap(ShenandoahHeap::heap()),
866 _worker_slices(worker_slices) {
867 }
868
869 void work(uint worker_id) {
924 }
925
926 r->set_live_data(live);
927 r->reset_alloc_metadata();
928 _live += live;
929 }
930
931 size_t get_live() {
932 return _live;
933 }
934 };
935
936 void ShenandoahFullGC::compact_humongous_objects() {
937 // Compact humongous regions, based on their fwdptr objects.
938 //
939 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
940 // humongous regions are already compacted, and do not require further moves, which alleviates
941 // sliding costs. We may consider doing this in parallel in future.
942
943 ShenandoahHeap* heap = ShenandoahHeap::heap();
944 const SlidingForwarding* const forwarding = heap->forwarding();
945
946 for (size_t c = heap->num_regions(); c > 0; c--) {
947 ShenandoahHeapRegion* r = heap->get_region(c - 1);
948 if (r->is_humongous_start()) {
949 oop old_obj = cast_to_oop(r->bottom());
950 if (!old_obj->is_forwarded()) {
951 // No need to move the object, it stays at the same slot
952 continue;
953 }
954 size_t words_size = old_obj->size();
955 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
956
957 size_t old_start = r->index();
958 size_t old_end = old_start + num_regions - 1;
959 size_t new_start = heap->heap_region_index_containing(forwarding->forwardee(old_obj));
960 size_t new_end = new_start + num_regions - 1;
961 assert(old_start != new_start, "must be real move");
962 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
963
964 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
965 heap->get_region(new_start)->bottom(),
966 words_size);
967
968 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
969 new_obj->init_mark();
970
971 {
972 for (size_t c = old_start; c <= old_end; c++) {
973 ShenandoahHeapRegion* r = heap->get_region(c);
974 r->make_regular_bypass();
975 r->set_top(r->bottom());
976 }
977
978 for (size_t c = new_start; c <= new_end; c++) {
979 ShenandoahHeapRegion* r = heap->get_region(c);
|