11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/continuationGCSupport.hpp"
29 #include "gc/shared/gcTraceTime.inline.hpp"
30 #include "gc/shared/preservedMarks.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "gc/shared/workerThread.hpp"
33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
34 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
37 #include "gc/shenandoah/shenandoahFullGC.hpp"
38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
39 #include "gc/shenandoah/shenandoahMark.inline.hpp"
40 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
43 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
45 #include "gc/shenandoah/shenandoahMetrics.hpp"
46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
47 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
48 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
49 #include "gc/shenandoah/shenandoahSTWMark.hpp"
50 #include "gc/shenandoah/shenandoahUtils.hpp"
204 // Coming out of Full GC, we would not have any forwarded objects.
205 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
206 heap->set_has_forwarded_objects(false);
207
208 heap->set_full_gc_move_in_progress(true);
209
210 // Setup workers for the rest
211 OrderAccess::fence();
212
213 // Initialize worker slices
214 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
215 for (uint i = 0; i < heap->max_workers(); i++) {
216 worker_slices[i] = new ShenandoahHeapRegionSet();
217 }
218
219 {
220 // The rest of code performs region moves, where region status is undefined
221 // until all phases run together.
222 ShenandoahHeapLocker lock(heap->lock());
223
224 phase2_calculate_target_addresses(worker_slices);
225
226 OrderAccess::fence();
227
228 phase3_update_references();
229
230 phase4_compact_objects(worker_slices);
231 }
232
233 {
234 // Epilogue
235 _preserved_marks->restore(heap->workers());
236 _preserved_marks->reclaim();
237 }
238
239 // Resize metaspace
240 MetaspaceGC::compute_new_size();
241
242 // Free worker slices
243 for (uint i = 0; i < heap->max_workers(); i++) {
244 delete worker_slices[i];
245 }
246 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
247
248 heap->set_full_gc_move_in_progress(false);
249 heap->set_full_gc_in_progress(false);
250
251 if (ShenandoahVerify) {
252 heap->verifier()->verify_after_fullgc();
253 }
254
255 if (VerifyAfterGC) {
256 Universe::verify();
278 void ShenandoahFullGC::phase1_mark_heap() {
279 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
280 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
281
282 ShenandoahHeap* heap = ShenandoahHeap::heap();
283
284 ShenandoahPrepareForMarkClosure cl;
285 heap->heap_region_iterate(&cl);
286
287 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
288
289 ShenandoahReferenceProcessor* rp = heap->ref_processor();
290 // enable ("weak") refs discovery
291 rp->set_soft_reference_policy(true); // forcefully purge all soft references
292
293 ShenandoahSTWMark mark(true /*full_gc*/);
294 mark.mark();
295 heap->parallel_cleaning(true /* full_gc */);
296 }
297
298 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
299 private:
300 PreservedMarks* const _preserved_marks;
301 ShenandoahHeap* const _heap;
302 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
303 int _empty_regions_pos;
304 ShenandoahHeapRegion* _to_region;
305 ShenandoahHeapRegion* _from_region;
306 HeapWord* _compact_point;
307
308 public:
309 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
310 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
311 ShenandoahHeapRegion* to_region) :
312 _preserved_marks(preserved_marks),
313 _heap(ShenandoahHeap::heap()),
314 _empty_regions(empty_regions),
315 _empty_regions_pos(0),
316 _to_region(to_region),
317 _from_region(nullptr),
346 // Object doesn't fit. Pick next empty region and start compacting there.
347 ShenandoahHeapRegion* new_to_region;
348 if (_empty_regions_pos < _empty_regions.length()) {
349 new_to_region = _empty_regions.at(_empty_regions_pos);
350 _empty_regions_pos++;
351 } else {
352 // Out of empty region? Compact within the same region.
353 new_to_region = _from_region;
354 }
355
356 assert(new_to_region != _to_region, "must not reuse same to-region");
357 assert(new_to_region != nullptr, "must not be null");
358 _to_region = new_to_region;
359 _compact_point = _to_region->bottom();
360 }
361
362 // Object fits into current region, record new location:
363 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
364 shenandoah_assert_not_forwarded(nullptr, p);
365 _preserved_marks->push_if_necessary(p, p->mark());
366 p->forward_to(cast_to_oop(_compact_point));
367 _compact_point += obj_size;
368 }
369 };
370
371 class ShenandoahPrepareForCompactionTask : public WorkerTask {
372 private:
373 PreservedMarksSet* const _preserved_marks;
374 ShenandoahHeap* const _heap;
375 ShenandoahHeapRegionSet** const _worker_slices;
376
377 public:
378 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
379 WorkerTask("Shenandoah Prepare For Compaction"),
380 _preserved_marks(preserved_marks),
381 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
382 }
383
384 static bool is_candidate_region(ShenandoahHeapRegion* r) {
385 // Empty region: get it into the slice to defragment the slice itself.
386 // We could have skipped this without violating correctness, but we really
387 // want to compact all live regions to the start of the heap, which sometimes
388 // means moving them into the fully empty regions.
389 if (r->is_empty()) return true;
390
391 // Can move the region, and this is not the humongous region. Humongous
392 // moves are special cased here, because their moves are handled separately.
393 return r->is_stw_move_allowed() && !r->is_humongous();
394 }
395
396 void work(uint worker_id) {
397 ShenandoahParallelWorkerSession worker_session(worker_id);
398 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
399 ShenandoahHeapRegionSetIterator it(slice);
400 ShenandoahHeapRegion* from_region = it.next();
401 // No work?
402 if (from_region == nullptr) {
403 return;
404 }
405
406 // Sliding compaction. Walk all regions in the slice, and compact them.
407 // Remember empty regions and reuse them as needed.
408 ResourceMark rm;
409
410 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
411
412 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
413
414 while (from_region != nullptr) {
415 assert(is_candidate_region(from_region), "Sanity");
416
417 cl.set_from_region(from_region);
418 if (from_region->has_live()) {
419 _heap->marked_object_iterate(from_region, &cl);
420 }
421
422 // Compacted the region to somewhere else? From-region is empty then.
423 if (!cl.is_compact_same_region()) {
424 empty_regions.append(from_region);
425 }
426 from_region = it.next();
427 }
428 cl.finish_region();
429
430 // Mark all remaining regions as empty
431 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
432 ShenandoahHeapRegion* r = empty_regions.at(pos);
433 r->set_new_top(r->bottom());
434 }
435 }
436 };
437
438 void ShenandoahFullGC::calculate_target_humongous_objects() {
439 ShenandoahHeap* heap = ShenandoahHeap::heap();
440
441 // Compute the new addresses for humongous objects. We need to do this after addresses
442 // for regular objects are calculated, and we know what regions in heap suffix are
443 // available for humongous moves.
444 //
445 // Scan the heap backwards, because we are compacting humongous regions towards the end.
446 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
447 // humongous start there.
448 //
449 // The complication is potential non-movable regions during the scan. If such region is
450 // detected, then sliding restarts towards that non-movable region.
451
452 size_t to_begin = heap->num_regions();
453 size_t to_end = heap->num_regions();
454
455 for (size_t c = heap->num_regions(); c > 0; c--) {
456 ShenandoahHeapRegion *r = heap->get_region(c - 1);
457 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
458 // To-region candidate: record this, and continue scan
459 to_begin = r->index();
460 continue;
461 }
462
463 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
464 // From-region candidate: movable humongous region
465 oop old_obj = cast_to_oop(r->bottom());
466 size_t words_size = old_obj->size();
467 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
468
469 size_t start = to_end - num_regions;
470
471 if (start >= to_begin && start != r->index()) {
472 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
473 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
474 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
475 to_end = start;
476 continue;
477 }
478 }
479
480 // Failed to fit. Scan starting from current region.
481 to_begin = r->index();
482 to_end = r->index();
483 }
484 }
485
486 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
487 private:
488 ShenandoahHeap* const _heap;
489
490 public:
491 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
492 void heap_region_do(ShenandoahHeapRegion* r) {
493 if (r->is_trash()) {
494 r->recycle();
495 }
496 if (r->is_cset()) {
497 r->make_regular_bypass();
498 }
499 if (r->is_empty_uncommitted()) {
500 r->make_committed_bypass();
501 }
502 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
503
504 // Record current region occupancy: this communicates empty regions are free
505 // to the rest of Full GC code.
703 heap->heap_region_iterate(&ecl);
704 }
705
706 // Compute the new addresses for regular objects
707 {
708 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
709
710 distribute_slices(worker_slices);
711
712 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
713 heap->workers()->run_task(&task);
714 }
715
716 // Compute the new addresses for humongous objects
717 {
718 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
719 calculate_target_humongous_objects();
720 }
721 }
722
723 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
724 private:
725 ShenandoahHeap* const _heap;
726 ShenandoahMarkingContext* const _ctx;
727
728 template <class T>
729 inline void do_oop_work(T* p) {
730 T o = RawAccess<>::oop_load(p);
731 if (!CompressedOops::is_null(o)) {
732 oop obj = CompressedOops::decode_not_null(o);
733 assert(_ctx->is_marked(obj), "must be marked");
734 if (obj->is_forwarded()) {
735 oop forw = obj->forwardee();
736 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
737 }
738 }
739 }
740
741 public:
742 ShenandoahAdjustPointersClosure() :
743 _heap(ShenandoahHeap::heap()),
744 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
745
746 void do_oop(oop* p) { do_oop_work(p); }
747 void do_oop(narrowOop* p) { do_oop_work(p); }
748 void do_method(Method* m) {}
749 void do_nmethod(nmethod* nm) {}
750 };
751
752 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
753 private:
754 ShenandoahHeap* const _heap;
755 ShenandoahAdjustPointersClosure _cl;
756
757 public:
758 ShenandoahAdjustPointersObjectClosure() :
759 _heap(ShenandoahHeap::heap()) {
760 }
761 void do_object(oop p) {
762 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
763 p->oop_iterate(&_cl);
764 }
765 };
766
767 class ShenandoahAdjustPointersTask : public WorkerTask {
768 private:
769 ShenandoahHeap* const _heap;
770 ShenandoahRegionIterator _regions;
771
772 public:
773 ShenandoahAdjustPointersTask() :
774 WorkerTask("Shenandoah Adjust Pointers"),
775 _heap(ShenandoahHeap::heap()) {
776 }
777
778 void work(uint worker_id) {
779 ShenandoahParallelWorkerSession worker_session(worker_id);
780 ShenandoahAdjustPointersObjectClosure obj_cl;
781 ShenandoahHeapRegion* r = _regions.next();
782 while (r != nullptr) {
783 if (!r->is_humongous_continuation() && r->has_live()) {
784 _heap->marked_object_iterate(r, &obj_cl);
785 }
786 r = _regions.next();
787 }
788 }
789 };
790
791 class ShenandoahAdjustRootPointersTask : public WorkerTask {
792 private:
793 ShenandoahRootAdjuster* _rp;
794 PreservedMarksSet* _preserved_marks;
795 public:
796 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
797 WorkerTask("Shenandoah Adjust Root Pointers"),
798 _rp(rp),
799 _preserved_marks(preserved_marks) {}
800
801 void work(uint worker_id) {
802 ShenandoahParallelWorkerSession worker_session(worker_id);
803 ShenandoahAdjustPointersClosure cl;
804 _rp->roots_do(worker_id, &cl);
805 _preserved_marks->get(worker_id)->adjust_during_full_gc();
806 }
807 };
808
809 void ShenandoahFullGC::phase3_update_references() {
810 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
811 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
812
813 ShenandoahHeap* heap = ShenandoahHeap::heap();
814
815 WorkerThreads* workers = heap->workers();
816 uint nworkers = workers->active_workers();
817 {
818 #if COMPILER2_OR_JVMCI
819 DerivedPointerTable::clear();
820 #endif
821 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
822 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
823 workers->run_task(&task);
824 #if COMPILER2_OR_JVMCI
825 DerivedPointerTable::update_pointers();
826 #endif
827 }
828
829 ShenandoahAdjustPointersTask adjust_pointers_task;
830 workers->run_task(&adjust_pointers_task);
831 }
832
833 class ShenandoahCompactObjectsClosure : public ObjectClosure {
834 private:
835 ShenandoahHeap* const _heap;
836 uint const _worker_id;
837
838 public:
839 ShenandoahCompactObjectsClosure(uint worker_id) :
840 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
841
842 void do_object(oop p) {
843 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
844 size_t size = p->size();
845 if (p->is_forwarded()) {
846 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
847 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
848 Copy::aligned_conjoint_words(compact_from, compact_to, size);
849 oop new_obj = cast_to_oop(compact_to);
850
851 ContinuationGCSupport::relativize_stack_chunk(new_obj);
852 new_obj->init_mark();
853 }
854 }
855 };
856
857 class ShenandoahCompactObjectsTask : public WorkerTask {
858 private:
859 ShenandoahHeap* const _heap;
860 ShenandoahHeapRegionSet** const _worker_slices;
861
862 public:
863 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
864 WorkerTask("Shenandoah Compact Objects"),
865 _heap(ShenandoahHeap::heap()),
866 _worker_slices(worker_slices) {
867 }
868
869 void work(uint worker_id) {
870 ShenandoahParallelWorkerSession worker_session(worker_id);
871 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
872
873 ShenandoahCompactObjectsClosure cl(worker_id);
874 ShenandoahHeapRegion* r = slice.next();
875 while (r != nullptr) {
876 assert(!r->is_humongous(), "must not get humongous regions here");
877 if (r->has_live()) {
878 _heap->marked_object_iterate(r, &cl);
879 }
880 r->set_top(r->new_top());
881 r = slice.next();
882 }
883 }
884 };
885
886 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
887 private:
888 ShenandoahHeap* const _heap;
889 size_t _live;
890
891 public:
892 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
893 _heap->free_set()->clear();
894 }
895
896 void heap_region_do(ShenandoahHeapRegion* r) {
897 assert (!r->is_cset(), "cset regions should have been demoted already");
898
899 // Need to reset the complete-top-at-mark-start pointer here because
900 // the complete marking bitmap is no longer valid. This ensures
901 // size-based iteration in marked_object_iterate().
902 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
903 // pinned regions.
916 if (r->is_regular() && live == 0) {
917 r->make_trash();
918 }
919
920 // Recycle all trash regions
921 if (r->is_trash()) {
922 live = 0;
923 r->recycle();
924 }
925
926 r->set_live_data(live);
927 r->reset_alloc_metadata();
928 _live += live;
929 }
930
931 size_t get_live() {
932 return _live;
933 }
934 };
935
936 void ShenandoahFullGC::compact_humongous_objects() {
937 // Compact humongous regions, based on their fwdptr objects.
938 //
939 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
940 // humongous regions are already compacted, and do not require further moves, which alleviates
941 // sliding costs. We may consider doing this in parallel in future.
942
943 ShenandoahHeap* heap = ShenandoahHeap::heap();
944
945 for (size_t c = heap->num_regions(); c > 0; c--) {
946 ShenandoahHeapRegion* r = heap->get_region(c - 1);
947 if (r->is_humongous_start()) {
948 oop old_obj = cast_to_oop(r->bottom());
949 if (!old_obj->is_forwarded()) {
950 // No need to move the object, it stays at the same slot
951 continue;
952 }
953 size_t words_size = old_obj->size();
954 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
955
956 size_t old_start = r->index();
957 size_t old_end = old_start + num_regions - 1;
958 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
959 size_t new_end = new_start + num_regions - 1;
960 assert(old_start != new_start, "must be real move");
961 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
962
963 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
964 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
965
966 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
967 new_obj->init_mark();
968
969 {
970 for (size_t c = old_start; c <= old_end; c++) {
971 ShenandoahHeapRegion* r = heap->get_region(c);
972 r->make_regular_bypass();
973 r->set_top(r->bottom());
974 }
975
976 for (size_t c = new_start; c <= new_end; c++) {
977 ShenandoahHeapRegion* r = heap->get_region(c);
978 if (c == new_start) {
979 r->make_humongous_start_bypass();
980 } else {
981 r->make_humongous_cont_bypass();
982 }
983
984 // Trailing region may be non-full, record the remainder there
985 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
986 if ((c == new_end) && (remainder != 0)) {
987 r->set_top(r->bottom() + remainder);
988 } else {
989 r->set_top(r->end());
990 }
991
992 r->reset_alloc_metadata();
993 }
994 }
995 }
996 }
997 }
998
999 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1000 // we need to remain able to walk pinned regions.
1001 // Since pinned region do not move and don't get compacted, we will get holes with
1002 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1003 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1004 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1005 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1006 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1007 private:
1008 ShenandoahRegionIterator _regions;
1009
1010 public:
1011 ShenandoahMCResetCompleteBitmapTask() :
1012 WorkerTask("Shenandoah Reset Bitmap") {
1013 }
1014
1015 void work(uint worker_id) {
1016 ShenandoahParallelWorkerSession worker_session(worker_id);
1017 ShenandoahHeapRegion* region = _regions.next();
1018 ShenandoahHeap* heap = ShenandoahHeap::heap();
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/continuationGCSupport.hpp"
29 #include "gc/shared/gcTraceTime.inline.hpp"
30 #include "gc/shared/preservedMarks.inline.hpp"
31 #include "gc/shared/slidingForwarding.inline.hpp"
32 #include "gc/shared/tlab_globals.hpp"
33 #include "gc/shared/workerThread.hpp"
34 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
35 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
38 #include "gc/shenandoah/shenandoahFullGC.hpp"
39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
40 #include "gc/shenandoah/shenandoahMark.inline.hpp"
41 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
44 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
46 #include "gc/shenandoah/shenandoahMetrics.hpp"
47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
48 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
49 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
50 #include "gc/shenandoah/shenandoahSTWMark.hpp"
51 #include "gc/shenandoah/shenandoahUtils.hpp"
205 // Coming out of Full GC, we would not have any forwarded objects.
206 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
207 heap->set_has_forwarded_objects(false);
208
209 heap->set_full_gc_move_in_progress(true);
210
211 // Setup workers for the rest
212 OrderAccess::fence();
213
214 // Initialize worker slices
215 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
216 for (uint i = 0; i < heap->max_workers(); i++) {
217 worker_slices[i] = new ShenandoahHeapRegionSet();
218 }
219
220 {
221 // The rest of code performs region moves, where region status is undefined
222 // until all phases run together.
223 ShenandoahHeapLocker lock(heap->lock());
224
225 SlidingForwarding::begin();
226
227 phase2_calculate_target_addresses(worker_slices);
228
229 OrderAccess::fence();
230
231 phase3_update_references();
232
233 phase4_compact_objects(worker_slices);
234 }
235
236 {
237 // Epilogue
238 _preserved_marks->restore(heap->workers());
239 _preserved_marks->reclaim();
240 SlidingForwarding::end();
241 }
242
243 // Resize metaspace
244 MetaspaceGC::compute_new_size();
245
246 // Free worker slices
247 for (uint i = 0; i < heap->max_workers(); i++) {
248 delete worker_slices[i];
249 }
250 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
251
252 heap->set_full_gc_move_in_progress(false);
253 heap->set_full_gc_in_progress(false);
254
255 if (ShenandoahVerify) {
256 heap->verifier()->verify_after_fullgc();
257 }
258
259 if (VerifyAfterGC) {
260 Universe::verify();
282 void ShenandoahFullGC::phase1_mark_heap() {
283 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
284 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
285
286 ShenandoahHeap* heap = ShenandoahHeap::heap();
287
288 ShenandoahPrepareForMarkClosure cl;
289 heap->heap_region_iterate(&cl);
290
291 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
292
293 ShenandoahReferenceProcessor* rp = heap->ref_processor();
294 // enable ("weak") refs discovery
295 rp->set_soft_reference_policy(true); // forcefully purge all soft references
296
297 ShenandoahSTWMark mark(true /*full_gc*/);
298 mark.mark();
299 heap->parallel_cleaning(true /* full_gc */);
300 }
301
302 template <bool ALT_FWD>
303 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
304 private:
305 PreservedMarks* const _preserved_marks;
306 ShenandoahHeap* const _heap;
307 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
308 int _empty_regions_pos;
309 ShenandoahHeapRegion* _to_region;
310 ShenandoahHeapRegion* _from_region;
311 HeapWord* _compact_point;
312
313 public:
314 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
315 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
316 ShenandoahHeapRegion* to_region) :
317 _preserved_marks(preserved_marks),
318 _heap(ShenandoahHeap::heap()),
319 _empty_regions(empty_regions),
320 _empty_regions_pos(0),
321 _to_region(to_region),
322 _from_region(nullptr),
351 // Object doesn't fit. Pick next empty region and start compacting there.
352 ShenandoahHeapRegion* new_to_region;
353 if (_empty_regions_pos < _empty_regions.length()) {
354 new_to_region = _empty_regions.at(_empty_regions_pos);
355 _empty_regions_pos++;
356 } else {
357 // Out of empty region? Compact within the same region.
358 new_to_region = _from_region;
359 }
360
361 assert(new_to_region != _to_region, "must not reuse same to-region");
362 assert(new_to_region != nullptr, "must not be null");
363 _to_region = new_to_region;
364 _compact_point = _to_region->bottom();
365 }
366
367 // Object fits into current region, record new location:
368 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
369 shenandoah_assert_not_forwarded(nullptr, p);
370 _preserved_marks->push_if_necessary(p, p->mark());
371 SlidingForwarding::forward_to<ALT_FWD>(p, cast_to_oop(_compact_point));
372 _compact_point += obj_size;
373 }
374 };
375
376 class ShenandoahPrepareForCompactionTask : public WorkerTask {
377 private:
378 PreservedMarksSet* const _preserved_marks;
379 ShenandoahHeap* const _heap;
380 ShenandoahHeapRegionSet** const _worker_slices;
381
382 public:
383 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
384 WorkerTask("Shenandoah Prepare For Compaction"),
385 _preserved_marks(preserved_marks),
386 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
387 }
388
389 static bool is_candidate_region(ShenandoahHeapRegion* r) {
390 // Empty region: get it into the slice to defragment the slice itself.
391 // We could have skipped this without violating correctness, but we really
392 // want to compact all live regions to the start of the heap, which sometimes
393 // means moving them into the fully empty regions.
394 if (r->is_empty()) return true;
395
396 // Can move the region, and this is not the humongous region. Humongous
397 // moves are special cased here, because their moves are handled separately.
398 return r->is_stw_move_allowed() && !r->is_humongous();
399 }
400
401 void work(uint worker_id) {
402 if (UseAltGCForwarding) {
403 work_impl<true>(worker_id);
404 } else {
405 work_impl<false>(worker_id);
406 }
407 }
408
409 private:
410 template <bool ALT_FWD>
411 void work_impl(uint worker_id) {
412 ShenandoahParallelWorkerSession worker_session(worker_id);
413 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
414 ShenandoahHeapRegionSetIterator it(slice);
415 ShenandoahHeapRegion* from_region = it.next();
416 // No work?
417 if (from_region == nullptr) {
418 return;
419 }
420
421 // Sliding compaction. Walk all regions in the slice, and compact them.
422 // Remember empty regions and reuse them as needed.
423 ResourceMark rm;
424
425 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
426
427 ShenandoahPrepareForCompactionObjectClosure<ALT_FWD> cl(_preserved_marks->get(worker_id), empty_regions, from_region);
428
429 while (from_region != nullptr) {
430 assert(is_candidate_region(from_region), "Sanity");
431
432 cl.set_from_region(from_region);
433 if (from_region->has_live()) {
434 _heap->marked_object_iterate(from_region, &cl);
435 }
436
437 // Compacted the region to somewhere else? From-region is empty then.
438 if (!cl.is_compact_same_region()) {
439 empty_regions.append(from_region);
440 }
441 from_region = it.next();
442 }
443 cl.finish_region();
444
445 // Mark all remaining regions as empty
446 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
447 ShenandoahHeapRegion* r = empty_regions.at(pos);
448 r->set_new_top(r->bottom());
449 }
450 }
451 };
452
453 template <bool ALT_FWD>
454 void ShenandoahFullGC::calculate_target_humongous_objects_impl() {
455 ShenandoahHeap* heap = ShenandoahHeap::heap();
456
457 // Compute the new addresses for humongous objects. We need to do this after addresses
458 // for regular objects are calculated, and we know what regions in heap suffix are
459 // available for humongous moves.
460 //
461 // Scan the heap backwards, because we are compacting humongous regions towards the end.
462 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
463 // humongous start there.
464 //
465 // The complication is potential non-movable regions during the scan. If such region is
466 // detected, then sliding restarts towards that non-movable region.
467
468 size_t to_begin = heap->num_regions();
469 size_t to_end = heap->num_regions();
470
471 for (size_t c = heap->num_regions(); c > 0; c--) {
472 ShenandoahHeapRegion *r = heap->get_region(c - 1);
473 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
474 // To-region candidate: record this, and continue scan
475 to_begin = r->index();
476 continue;
477 }
478
479 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
480 // From-region candidate: movable humongous region
481 oop old_obj = cast_to_oop(r->bottom());
482 size_t words_size = old_obj->size();
483 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
484
485 size_t start = to_end - num_regions;
486
487 if (start >= to_begin && start != r->index()) {
488 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
489 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
490 SlidingForwarding::forward_to<ALT_FWD>(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
491 to_end = start;
492 continue;
493 }
494 }
495
496 // Failed to fit. Scan starting from current region.
497 to_begin = r->index();
498 to_end = r->index();
499 }
500 }
501
502 void ShenandoahFullGC::calculate_target_humongous_objects() {
503 if (UseAltGCForwarding) {
504 calculate_target_humongous_objects_impl<true>();
505 } else {
506 calculate_target_humongous_objects_impl<false>();
507 }
508 }
509
510 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
511 private:
512 ShenandoahHeap* const _heap;
513
514 public:
515 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
516 void heap_region_do(ShenandoahHeapRegion* r) {
517 if (r->is_trash()) {
518 r->recycle();
519 }
520 if (r->is_cset()) {
521 r->make_regular_bypass();
522 }
523 if (r->is_empty_uncommitted()) {
524 r->make_committed_bypass();
525 }
526 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
527
528 // Record current region occupancy: this communicates empty regions are free
529 // to the rest of Full GC code.
727 heap->heap_region_iterate(&ecl);
728 }
729
730 // Compute the new addresses for regular objects
731 {
732 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
733
734 distribute_slices(worker_slices);
735
736 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
737 heap->workers()->run_task(&task);
738 }
739
740 // Compute the new addresses for humongous objects
741 {
742 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
743 calculate_target_humongous_objects();
744 }
745 }
746
747 template <bool ALT_FWD>
748 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
749 private:
750 ShenandoahHeap* const _heap;
751 ShenandoahMarkingContext* const _ctx;
752
753 template <class T>
754 inline void do_oop_work(T* p) {
755 T o = RawAccess<>::oop_load(p);
756 if (!CompressedOops::is_null(o)) {
757 oop obj = CompressedOops::decode_not_null(o);
758 assert(_ctx->is_marked(obj), "must be marked");
759 if (SlidingForwarding::is_forwarded(obj)) {
760 oop forw = SlidingForwarding::forwardee<ALT_FWD>(obj);
761 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
762 }
763 }
764 }
765
766 public:
767 ShenandoahAdjustPointersClosure() :
768 _heap(ShenandoahHeap::heap()),
769 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
770
771 void do_oop(oop* p) { do_oop_work(p); }
772 void do_oop(narrowOop* p) { do_oop_work(p); }
773 void do_method(Method* m) {}
774 void do_nmethod(nmethod* nm) {}
775 };
776
777 template <bool ALT_FWD>
778 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
779 private:
780 ShenandoahHeap* const _heap;
781 ShenandoahAdjustPointersClosure<ALT_FWD> _cl;
782
783 public:
784 ShenandoahAdjustPointersObjectClosure() :
785 _heap(ShenandoahHeap::heap()) {
786 }
787 void do_object(oop p) {
788 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
789 p->oop_iterate(&_cl);
790 }
791 };
792
793 class ShenandoahAdjustPointersTask : public WorkerTask {
794 private:
795 ShenandoahHeap* const _heap;
796 ShenandoahRegionIterator _regions;
797
798 public:
799 ShenandoahAdjustPointersTask() :
800 WorkerTask("Shenandoah Adjust Pointers"),
801 _heap(ShenandoahHeap::heap()) {
802 }
803
804 private:
805 template <bool ALT_FWD>
806 void work_impl(uint worker_id) {
807 ShenandoahParallelWorkerSession worker_session(worker_id);
808 ShenandoahAdjustPointersObjectClosure<ALT_FWD> obj_cl;
809 ShenandoahHeapRegion* r = _regions.next();
810 while (r != nullptr) {
811 if (!r->is_humongous_continuation() && r->has_live()) {
812 _heap->marked_object_iterate(r, &obj_cl);
813 }
814 r = _regions.next();
815 }
816 }
817
818 public:
819 void work(uint worker_id) {
820 if (UseAltGCForwarding) {
821 work_impl<true>(worker_id);
822 } else {
823 work_impl<false>(worker_id);
824 }
825 }
826 };
827
828 class ShenandoahAdjustRootPointersTask : public WorkerTask {
829 private:
830 ShenandoahRootAdjuster* _rp;
831 PreservedMarksSet* _preserved_marks;
832
833 public:
834 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
835 WorkerTask("Shenandoah Adjust Root Pointers"),
836 _rp(rp),
837 _preserved_marks(preserved_marks) {}
838
839 private:
840 template <bool ALT_FWD>
841 void work_impl(uint worker_id) {
842 ShenandoahParallelWorkerSession worker_session(worker_id);
843 ShenandoahAdjustPointersClosure<ALT_FWD> cl;
844 _rp->roots_do(worker_id, &cl);
845 _preserved_marks->get(worker_id)->adjust_during_full_gc();
846 }
847
848 public:
849 void work(uint worker_id) {
850 if (UseAltGCForwarding) {
851 work_impl<true>(worker_id);
852 } else {
853 work_impl<false>(worker_id);
854 }
855 }
856 };
857
858 void ShenandoahFullGC::phase3_update_references() {
859 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
860 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
861
862 ShenandoahHeap* heap = ShenandoahHeap::heap();
863
864 WorkerThreads* workers = heap->workers();
865 uint nworkers = workers->active_workers();
866 {
867 #if COMPILER2_OR_JVMCI
868 DerivedPointerTable::clear();
869 #endif
870 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
871 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
872 workers->run_task(&task);
873 #if COMPILER2_OR_JVMCI
874 DerivedPointerTable::update_pointers();
875 #endif
876 }
877
878 ShenandoahAdjustPointersTask adjust_pointers_task;
879 workers->run_task(&adjust_pointers_task);
880 }
881
882 template <bool ALT_FWD>
883 class ShenandoahCompactObjectsClosure : public ObjectClosure {
884 private:
885 ShenandoahHeap* const _heap;
886 uint const _worker_id;
887
888 public:
889 ShenandoahCompactObjectsClosure(uint worker_id) :
890 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
891
892 void do_object(oop p) {
893 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
894 size_t size = p->size();
895 if (SlidingForwarding::is_forwarded(p)) {
896 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
897 HeapWord* compact_to = cast_from_oop<HeapWord*>(SlidingForwarding::forwardee<ALT_FWD>(p));
898 Copy::aligned_conjoint_words(compact_from, compact_to, size);
899 oop new_obj = cast_to_oop(compact_to);
900
901 ContinuationGCSupport::relativize_stack_chunk(new_obj);
902 new_obj->init_mark();
903 }
904 }
905 };
906
907 class ShenandoahCompactObjectsTask : public WorkerTask {
908 private:
909 ShenandoahHeap* const _heap;
910 ShenandoahHeapRegionSet** const _worker_slices;
911
912 public:
913 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
914 WorkerTask("Shenandoah Compact Objects"),
915 _heap(ShenandoahHeap::heap()),
916 _worker_slices(worker_slices) {
917 }
918
919 private:
920 template <bool ALT_FWD>
921 void work_impl(uint worker_id) {
922 ShenandoahParallelWorkerSession worker_session(worker_id);
923 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
924
925 ShenandoahCompactObjectsClosure<ALT_FWD> cl(worker_id);
926 ShenandoahHeapRegion* r = slice.next();
927 while (r != nullptr) {
928 assert(!r->is_humongous(), "must not get humongous regions here");
929 if (r->has_live()) {
930 _heap->marked_object_iterate(r, &cl);
931 }
932 r->set_top(r->new_top());
933 r = slice.next();
934 }
935 }
936
937 public:
938 void work(uint worker_id) {
939 if (UseAltGCForwarding) {
940 work_impl<true>(worker_id);
941 } else {
942 work_impl<false>(worker_id);
943 }
944 }
945 };
946
947 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
948 private:
949 ShenandoahHeap* const _heap;
950 size_t _live;
951
952 public:
953 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
954 _heap->free_set()->clear();
955 }
956
957 void heap_region_do(ShenandoahHeapRegion* r) {
958 assert (!r->is_cset(), "cset regions should have been demoted already");
959
960 // Need to reset the complete-top-at-mark-start pointer here because
961 // the complete marking bitmap is no longer valid. This ensures
962 // size-based iteration in marked_object_iterate().
963 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
964 // pinned regions.
977 if (r->is_regular() && live == 0) {
978 r->make_trash();
979 }
980
981 // Recycle all trash regions
982 if (r->is_trash()) {
983 live = 0;
984 r->recycle();
985 }
986
987 r->set_live_data(live);
988 r->reset_alloc_metadata();
989 _live += live;
990 }
991
992 size_t get_live() {
993 return _live;
994 }
995 };
996
997 template <bool ALT_FWD>
998 void ShenandoahFullGC::compact_humongous_objects_impl() {
999 // Compact humongous regions, based on their fwdptr objects.
1000 //
1001 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1002 // humongous regions are already compacted, and do not require further moves, which alleviates
1003 // sliding costs. We may consider doing this in parallel in future.
1004
1005 ShenandoahHeap* heap = ShenandoahHeap::heap();
1006
1007 for (size_t c = heap->num_regions(); c > 0; c--) {
1008 ShenandoahHeapRegion* r = heap->get_region(c - 1);
1009 if (r->is_humongous_start()) {
1010 oop old_obj = cast_to_oop(r->bottom());
1011 if (SlidingForwarding::is_not_forwarded(old_obj)) {
1012 // No need to move the object, it stays at the same slot
1013 continue;
1014 }
1015 size_t words_size = old_obj->size();
1016 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1017
1018 size_t old_start = r->index();
1019 size_t old_end = old_start + num_regions - 1;
1020 size_t new_start = heap->heap_region_index_containing(SlidingForwarding::forwardee<ALT_FWD>(old_obj));
1021 size_t new_end = new_start + num_regions - 1;
1022 assert(old_start != new_start, "must be real move");
1023 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1024
1025 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1026 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1027
1028 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1029 new_obj->init_mark();
1030
1031 {
1032 for (size_t c = old_start; c <= old_end; c++) {
1033 ShenandoahHeapRegion* r = heap->get_region(c);
1034 r->make_regular_bypass();
1035 r->set_top(r->bottom());
1036 }
1037
1038 for (size_t c = new_start; c <= new_end; c++) {
1039 ShenandoahHeapRegion* r = heap->get_region(c);
1040 if (c == new_start) {
1041 r->make_humongous_start_bypass();
1042 } else {
1043 r->make_humongous_cont_bypass();
1044 }
1045
1046 // Trailing region may be non-full, record the remainder there
1047 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1048 if ((c == new_end) && (remainder != 0)) {
1049 r->set_top(r->bottom() + remainder);
1050 } else {
1051 r->set_top(r->end());
1052 }
1053
1054 r->reset_alloc_metadata();
1055 }
1056 }
1057 }
1058 }
1059 }
1060
1061 void ShenandoahFullGC::compact_humongous_objects() {
1062 if (UseAltGCForwarding) {
1063 compact_humongous_objects_impl<true>();
1064 } else {
1065 compact_humongous_objects_impl<false>();
1066 }
1067 }
1068
1069 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1070 // we need to remain able to walk pinned regions.
1071 // Since pinned region do not move and don't get compacted, we will get holes with
1072 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1073 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1074 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1075 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1076 class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
1077 private:
1078 ShenandoahRegionIterator _regions;
1079
1080 public:
1081 ShenandoahMCResetCompleteBitmapTask() :
1082 WorkerTask("Shenandoah Reset Bitmap") {
1083 }
1084
1085 void work(uint worker_id) {
1086 ShenandoahParallelWorkerSession worker_session(worker_id);
1087 ShenandoahHeapRegion* region = _regions.next();
1088 ShenandoahHeap* heap = ShenandoahHeap::heap();
|