10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/preservedMarks.inline.hpp"
30 #include "gc/shared/tlab_globals.hpp"
31 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
32 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
35 #include "gc/shenandoah/shenandoahFullGC.hpp"
36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
37 #include "gc/shenandoah/shenandoahMark.inline.hpp"
38 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
39 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
41 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
42 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
43 #include "gc/shenandoah/shenandoahMetrics.hpp"
44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
45 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
46 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
47 #include "gc/shenandoah/shenandoahSTWMark.hpp"
48 #include "gc/shenandoah/shenandoahUtils.hpp"
49 #include "gc/shenandoah/shenandoahVerifier.hpp"
205 // Coming out of Full GC, we would not have any forwarded objects.
206 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
207 heap->set_has_forwarded_objects(false);
208
209 heap->set_full_gc_move_in_progress(true);
210
211 // Setup workers for the rest
212 OrderAccess::fence();
213
214 // Initialize worker slices
215 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
216 for (uint i = 0; i < heap->max_workers(); i++) {
217 worker_slices[i] = new ShenandoahHeapRegionSet();
218 }
219
220 {
221 // The rest of code performs region moves, where region status is undefined
222 // until all phases run together.
223 ShenandoahHeapLocker lock(heap->lock());
224
225 phase2_calculate_target_addresses(worker_slices);
226
227 OrderAccess::fence();
228
229 phase3_update_references();
230
231 phase4_compact_objects(worker_slices);
232 }
233
234 {
235 // Epilogue
236 _preserved_marks->restore(heap->workers());
237 BiasedLocking::restore_marks();
238 _preserved_marks->reclaim();
239 }
240
241 // Resize metaspace
242 MetaspaceGC::compute_new_size();
243
244 // Free worker slices
245 for (uint i = 0; i < heap->max_workers(); i++) {
246 delete worker_slices[i];
247 }
248 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
249
250 heap->set_full_gc_move_in_progress(false);
251 heap->set_full_gc_in_progress(false);
252
253 if (ShenandoahVerify) {
254 heap->verifier()->verify_after_fullgc();
255 }
256
257 if (VerifyAfterGC) {
258 Universe::verify();
280 void ShenandoahFullGC::phase1_mark_heap() {
281 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
282 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
283
284 ShenandoahHeap* heap = ShenandoahHeap::heap();
285
286 ShenandoahPrepareForMarkClosure cl;
287 heap->heap_region_iterate(&cl);
288
289 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
290
291 ShenandoahReferenceProcessor* rp = heap->ref_processor();
292 // enable ("weak") refs discovery
293 rp->set_soft_reference_policy(true); // forcefully purge all soft references
294
295 ShenandoahSTWMark mark(true /*full_gc*/);
296 mark.mark();
297 heap->parallel_cleaning(true /* full_gc */);
298 }
299
300 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
301 private:
302 PreservedMarks* const _preserved_marks;
303 ShenandoahHeap* const _heap;
304 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
305 int _empty_regions_pos;
306 ShenandoahHeapRegion* _to_region;
307 ShenandoahHeapRegion* _from_region;
308 HeapWord* _compact_point;
309
310 public:
311 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
312 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
313 ShenandoahHeapRegion* to_region) :
314 _preserved_marks(preserved_marks),
315 _heap(ShenandoahHeap::heap()),
316 _empty_regions(empty_regions),
317 _empty_regions_pos(0),
318 _to_region(to_region),
319 _from_region(NULL),
348 // Object doesn't fit. Pick next empty region and start compacting there.
349 ShenandoahHeapRegion* new_to_region;
350 if (_empty_regions_pos < _empty_regions.length()) {
351 new_to_region = _empty_regions.at(_empty_regions_pos);
352 _empty_regions_pos++;
353 } else {
354 // Out of empty region? Compact within the same region.
355 new_to_region = _from_region;
356 }
357
358 assert(new_to_region != _to_region, "must not reuse same to-region");
359 assert(new_to_region != NULL, "must not be NULL");
360 _to_region = new_to_region;
361 _compact_point = _to_region->bottom();
362 }
363
364 // Object fits into current region, record new location:
365 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
366 shenandoah_assert_not_forwarded(NULL, p);
367 _preserved_marks->push_if_necessary(p, p->mark());
368 p->forward_to(cast_to_oop(_compact_point));
369 _compact_point += obj_size;
370 }
371 };
372
373 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
374 private:
375 PreservedMarksSet* const _preserved_marks;
376 ShenandoahHeap* const _heap;
377 ShenandoahHeapRegionSet** const _worker_slices;
378
379 public:
380 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
381 AbstractGangTask("Shenandoah Prepare For Compaction"),
382 _preserved_marks(preserved_marks),
383 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
384 }
385
386 static bool is_candidate_region(ShenandoahHeapRegion* r) {
387 // Empty region: get it into the slice to defragment the slice itself.
388 // We could have skipped this without violating correctness, but we really
389 // want to compact all live regions to the start of the heap, which sometimes
390 // means moving them into the fully empty regions.
391 if (r->is_empty()) return true;
392
393 // Can move the region, and this is not the humongous region. Humongous
394 // moves are special cased here, because their moves are handled separately.
395 return r->is_stw_move_allowed() && !r->is_humongous();
396 }
397
398 void work(uint worker_id) {
399 ShenandoahParallelWorkerSession worker_session(worker_id);
400 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
401 ShenandoahHeapRegionSetIterator it(slice);
402 ShenandoahHeapRegion* from_region = it.next();
403 // No work?
404 if (from_region == NULL) {
405 return;
406 }
407
408 // Sliding compaction. Walk all regions in the slice, and compact them.
409 // Remember empty regions and reuse them as needed.
410 ResourceMark rm;
411
412 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
413
414 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
415
416 while (from_region != NULL) {
417 assert(is_candidate_region(from_region), "Sanity");
418
419 cl.set_from_region(from_region);
420 if (from_region->has_live()) {
421 _heap->marked_object_iterate(from_region, &cl);
422 }
423
424 // Compacted the region to somewhere else? From-region is empty then.
425 if (!cl.is_compact_same_region()) {
426 empty_regions.append(from_region);
427 }
428 from_region = it.next();
429 }
430 cl.finish_region();
431
432 // Mark all remaining regions as empty
433 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
434 ShenandoahHeapRegion* r = empty_regions.at(pos);
435 r->set_new_top(r->bottom());
436 }
437 }
438 };
439
440 void ShenandoahFullGC::calculate_target_humongous_objects() {
441 ShenandoahHeap* heap = ShenandoahHeap::heap();
442
443 // Compute the new addresses for humongous objects. We need to do this after addresses
444 // for regular objects are calculated, and we know what regions in heap suffix are
445 // available for humongous moves.
446 //
447 // Scan the heap backwards, because we are compacting humongous regions towards the end.
448 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
449 // humongous start there.
450 //
451 // The complication is potential non-movable regions during the scan. If such region is
452 // detected, then sliding restarts towards that non-movable region.
453
454 size_t to_begin = heap->num_regions();
455 size_t to_end = heap->num_regions();
456
457 for (size_t c = heap->num_regions(); c > 0; c--) {
458 ShenandoahHeapRegion *r = heap->get_region(c - 1);
459 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
460 // To-region candidate: record this, and continue scan
461 to_begin = r->index();
462 continue;
463 }
464
465 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
466 // From-region candidate: movable humongous region
467 oop old_obj = cast_to_oop(r->bottom());
468 size_t words_size = old_obj->size();
469 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
470
471 size_t start = to_end - num_regions;
472
473 if (start >= to_begin && start != r->index()) {
474 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
475 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
476 old_obj->forward_to(cast_to_oop(heap->get_region(start)->bottom()));
477 to_end = start;
478 continue;
479 }
480 }
481
482 // Failed to fit. Scan starting from current region.
483 to_begin = r->index();
484 to_end = r->index();
485 }
486 }
487
488 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
489 private:
490 ShenandoahHeap* const _heap;
491
492 public:
493 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
494 void heap_region_do(ShenandoahHeapRegion* r) {
495 if (r->is_trash()) {
496 r->recycle();
497 }
498 if (r->is_cset()) {
499 r->make_regular_bypass();
500 }
501 if (r->is_empty_uncommitted()) {
502 r->make_committed_bypass();
503 }
504 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
505
506 // Record current region occupancy: this communicates empty regions are free
507 // to the rest of Full GC code.
705 heap->heap_region_iterate(&ecl);
706 }
707
708 // Compute the new addresses for regular objects
709 {
710 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
711
712 distribute_slices(worker_slices);
713
714 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
715 heap->workers()->run_task(&task);
716 }
717
718 // Compute the new addresses for humongous objects
719 {
720 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
721 calculate_target_humongous_objects();
722 }
723 }
724
725 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
726 private:
727 ShenandoahHeap* const _heap;
728 ShenandoahMarkingContext* const _ctx;
729
730 template <class T>
731 inline void do_oop_work(T* p) {
732 T o = RawAccess<>::oop_load(p);
733 if (!CompressedOops::is_null(o)) {
734 oop obj = CompressedOops::decode_not_null(o);
735 assert(_ctx->is_marked(obj), "must be marked");
736 if (obj->is_forwarded()) {
737 oop forw = obj->forwardee();
738 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
739 }
740 }
741 }
742
743 public:
744 ShenandoahAdjustPointersClosure() :
745 _heap(ShenandoahHeap::heap()),
746 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
747
748 void do_oop(oop* p) { do_oop_work(p); }
749 void do_oop(narrowOop* p) { do_oop_work(p); }
750 };
751
752 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
753 private:
754 ShenandoahHeap* const _heap;
755 ShenandoahAdjustPointersClosure _cl;
756
757 public:
758 ShenandoahAdjustPointersObjectClosure() :
759 _heap(ShenandoahHeap::heap()) {
760 }
761 void do_object(oop p) {
762 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
763 p->oop_iterate(&_cl);
764 }
765 };
766
767 class ShenandoahAdjustPointersTask : public AbstractGangTask {
768 private:
769 ShenandoahHeap* const _heap;
770 ShenandoahRegionIterator _regions;
771
772 public:
773 ShenandoahAdjustPointersTask() :
774 AbstractGangTask("Shenandoah Adjust Pointers"),
775 _heap(ShenandoahHeap::heap()) {
776 }
777
778 void work(uint worker_id) {
779 ShenandoahParallelWorkerSession worker_session(worker_id);
780 ShenandoahAdjustPointersObjectClosure obj_cl;
781 ShenandoahHeapRegion* r = _regions.next();
782 while (r != NULL) {
783 if (!r->is_humongous_continuation() && r->has_live()) {
784 _heap->marked_object_iterate(r, &obj_cl);
785 }
786 r = _regions.next();
787 }
788 }
789 };
790
791 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
792 private:
793 ShenandoahRootAdjuster* _rp;
794 PreservedMarksSet* _preserved_marks;
795 public:
796 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
797 AbstractGangTask("Shenandoah Adjust Root Pointers"),
798 _rp(rp),
799 _preserved_marks(preserved_marks) {}
800
801 void work(uint worker_id) {
802 ShenandoahParallelWorkerSession worker_session(worker_id);
803 ShenandoahAdjustPointersClosure cl;
804 _rp->roots_do(worker_id, &cl);
805 _preserved_marks->get(worker_id)->adjust_during_full_gc();
806 }
807 };
808
809 void ShenandoahFullGC::phase3_update_references() {
810 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
811 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
812
813 ShenandoahHeap* heap = ShenandoahHeap::heap();
814
815 WorkGang* workers = heap->workers();
816 uint nworkers = workers->active_workers();
817 {
818 #if COMPILER2_OR_JVMCI
819 DerivedPointerTable::clear();
820 #endif
821 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
822 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
823 workers->run_task(&task);
824 #if COMPILER2_OR_JVMCI
825 DerivedPointerTable::update_pointers();
826 #endif
827 }
828
829 ShenandoahAdjustPointersTask adjust_pointers_task;
830 workers->run_task(&adjust_pointers_task);
831 }
832
833 class ShenandoahCompactObjectsClosure : public ObjectClosure {
834 private:
835 ShenandoahHeap* const _heap;
836 uint const _worker_id;
837
838 public:
839 ShenandoahCompactObjectsClosure(uint worker_id) :
840 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
841
842 void do_object(oop p) {
843 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
844 size_t size = (size_t)p->size();
845 if (p->is_forwarded()) {
846 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
847 HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
848 Copy::aligned_conjoint_words(compact_from, compact_to, size);
849 oop new_obj = cast_to_oop(compact_to);
850 new_obj->init_mark();
851 }
852 }
853 };
854
855 class ShenandoahCompactObjectsTask : public AbstractGangTask {
856 private:
857 ShenandoahHeap* const _heap;
858 ShenandoahHeapRegionSet** const _worker_slices;
859
860 public:
861 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
862 AbstractGangTask("Shenandoah Compact Objects"),
863 _heap(ShenandoahHeap::heap()),
864 _worker_slices(worker_slices) {
865 }
866
867 void work(uint worker_id) {
868 ShenandoahParallelWorkerSession worker_session(worker_id);
869 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
870
871 ShenandoahCompactObjectsClosure cl(worker_id);
872 ShenandoahHeapRegion* r = slice.next();
873 while (r != NULL) {
874 assert(!r->is_humongous(), "must not get humongous regions here");
875 if (r->has_live()) {
876 _heap->marked_object_iterate(r, &cl);
877 }
878 r->set_top(r->new_top());
879 r = slice.next();
880 }
881 }
882 };
883
884 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
885 private:
886 ShenandoahHeap* const _heap;
887 size_t _live;
888
889 public:
890 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
891 _heap->free_set()->clear();
892 }
893
894 void heap_region_do(ShenandoahHeapRegion* r) {
895 assert (!r->is_cset(), "cset regions should have been demoted already");
896
897 // Need to reset the complete-top-at-mark-start pointer here because
898 // the complete marking bitmap is no longer valid. This ensures
899 // size-based iteration in marked_object_iterate().
900 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
901 // pinned regions.
917 if (r->is_regular() && live == 0) {
918 r->make_trash();
919 }
920
921 // Recycle all trash regions
922 if (r->is_trash()) {
923 live = 0;
924 r->recycle();
925 }
926
927 r->set_live_data(live);
928 r->reset_alloc_metadata();
929 _live += live;
930 }
931
932 size_t get_live() {
933 return _live;
934 }
935 };
936
937 void ShenandoahFullGC::compact_humongous_objects() {
938 // Compact humongous regions, based on their fwdptr objects.
939 //
940 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
941 // humongous regions are already compacted, and do not require further moves, which alleviates
942 // sliding costs. We may consider doing this in parallel in future.
943
944 ShenandoahHeap* heap = ShenandoahHeap::heap();
945
946 for (size_t c = heap->num_regions(); c > 0; c--) {
947 ShenandoahHeapRegion* r = heap->get_region(c - 1);
948 if (r->is_humongous_start()) {
949 oop old_obj = cast_to_oop(r->bottom());
950 if (!old_obj->is_forwarded()) {
951 // No need to move the object, it stays at the same slot
952 continue;
953 }
954 size_t words_size = old_obj->size();
955 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
956
957 size_t old_start = r->index();
958 size_t old_end = old_start + num_regions - 1;
959 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
960 size_t new_end = new_start + num_regions - 1;
961 assert(old_start != new_start, "must be real move");
962 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
963
964 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
965 heap->get_region(new_start)->bottom(),
966 words_size);
967
968 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
969 new_obj->init_mark();
970
971 {
972 for (size_t c = old_start; c <= old_end; c++) {
973 ShenandoahHeapRegion* r = heap->get_region(c);
974 r->make_regular_bypass();
975 r->set_top(r->bottom());
976 }
977
978 for (size_t c = new_start; c <= new_end; c++) {
979 ShenandoahHeapRegion* r = heap->get_region(c);
981 r->make_humongous_start_bypass();
982 } else {
983 r->make_humongous_cont_bypass();
984 }
985
986 // Trailing region may be non-full, record the remainder there
987 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
988 if ((c == new_end) && (remainder != 0)) {
989 r->set_top(r->bottom() + remainder);
990 } else {
991 r->set_top(r->end());
992 }
993
994 r->reset_alloc_metadata();
995 }
996 }
997 }
998 }
999 }
1000
1001 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1002 // we need to remain able to walk pinned regions.
1003 // Since pinned region do not move and don't get compacted, we will get holes with
1004 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1005 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1006 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1007 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1008 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
1009 private:
1010 ShenandoahRegionIterator _regions;
1011
1012 public:
1013 ShenandoahMCResetCompleteBitmapTask() :
1014 AbstractGangTask("Shenandoah Reset Bitmap") {
1015 }
1016
1017 void work(uint worker_id) {
1018 ShenandoahParallelWorkerSession worker_session(worker_id);
1019 ShenandoahHeapRegion* region = _regions.next();
1020 ShenandoahHeap* heap = ShenandoahHeap::heap();
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26
27 #include "compiler/oopMap.hpp"
28 #include "gc/shared/gcTraceTime.inline.hpp"
29 #include "gc/shared/preservedMarks.inline.hpp"
30 #include "gc/shared/slidingForwarding.inline.hpp"
31 #include "gc/shared/tlab_globals.hpp"
32 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
33 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
36 #include "gc/shenandoah/shenandoahFullGC.hpp"
37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
38 #include "gc/shenandoah/shenandoahMark.inline.hpp"
39 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
42 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
44 #include "gc/shenandoah/shenandoahMetrics.hpp"
45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
46 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
47 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
48 #include "gc/shenandoah/shenandoahSTWMark.hpp"
49 #include "gc/shenandoah/shenandoahUtils.hpp"
50 #include "gc/shenandoah/shenandoahVerifier.hpp"
206 // Coming out of Full GC, we would not have any forwarded objects.
207 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
208 heap->set_has_forwarded_objects(false);
209
210 heap->set_full_gc_move_in_progress(true);
211
212 // Setup workers for the rest
213 OrderAccess::fence();
214
215 // Initialize worker slices
216 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
217 for (uint i = 0; i < heap->max_workers(); i++) {
218 worker_slices[i] = new ShenandoahHeapRegionSet();
219 }
220
221 {
222 // The rest of code performs region moves, where region status is undefined
223 // until all phases run together.
224 ShenandoahHeapLocker lock(heap->lock());
225
226 SlidingForwarding::begin();
227
228 phase2_calculate_target_addresses(worker_slices);
229
230 OrderAccess::fence();
231
232 phase3_update_references();
233
234 phase4_compact_objects(worker_slices);
235 }
236
237 {
238 // Epilogue
239 _preserved_marks->restore(heap->workers());
240 BiasedLocking::restore_marks();
241 _preserved_marks->reclaim();
242 SlidingForwarding::end();
243 }
244
245 // Resize metaspace
246 MetaspaceGC::compute_new_size();
247
248 // Free worker slices
249 for (uint i = 0; i < heap->max_workers(); i++) {
250 delete worker_slices[i];
251 }
252 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
253
254 heap->set_full_gc_move_in_progress(false);
255 heap->set_full_gc_in_progress(false);
256
257 if (ShenandoahVerify) {
258 heap->verifier()->verify_after_fullgc();
259 }
260
261 if (VerifyAfterGC) {
262 Universe::verify();
284 void ShenandoahFullGC::phase1_mark_heap() {
285 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
286 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
287
288 ShenandoahHeap* heap = ShenandoahHeap::heap();
289
290 ShenandoahPrepareForMarkClosure cl;
291 heap->heap_region_iterate(&cl);
292
293 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
294
295 ShenandoahReferenceProcessor* rp = heap->ref_processor();
296 // enable ("weak") refs discovery
297 rp->set_soft_reference_policy(true); // forcefully purge all soft references
298
299 ShenandoahSTWMark mark(true /*full_gc*/);
300 mark.mark();
301 heap->parallel_cleaning(true /* full_gc */);
302 }
303
304 template <bool ALT_FWD>
305 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
306 private:
307 PreservedMarks* const _preserved_marks;
308 ShenandoahHeap* const _heap;
309 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
310 int _empty_regions_pos;
311 ShenandoahHeapRegion* _to_region;
312 ShenandoahHeapRegion* _from_region;
313 HeapWord* _compact_point;
314
315 public:
316 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks,
317 GrowableArray<ShenandoahHeapRegion*>& empty_regions,
318 ShenandoahHeapRegion* to_region) :
319 _preserved_marks(preserved_marks),
320 _heap(ShenandoahHeap::heap()),
321 _empty_regions(empty_regions),
322 _empty_regions_pos(0),
323 _to_region(to_region),
324 _from_region(NULL),
353 // Object doesn't fit. Pick next empty region and start compacting there.
354 ShenandoahHeapRegion* new_to_region;
355 if (_empty_regions_pos < _empty_regions.length()) {
356 new_to_region = _empty_regions.at(_empty_regions_pos);
357 _empty_regions_pos++;
358 } else {
359 // Out of empty region? Compact within the same region.
360 new_to_region = _from_region;
361 }
362
363 assert(new_to_region != _to_region, "must not reuse same to-region");
364 assert(new_to_region != NULL, "must not be NULL");
365 _to_region = new_to_region;
366 _compact_point = _to_region->bottom();
367 }
368
369 // Object fits into current region, record new location:
370 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
371 shenandoah_assert_not_forwarded(NULL, p);
372 _preserved_marks->push_if_necessary(p, p->mark());
373 SlidingForwarding::forward_to<ALT_FWD>(p, cast_to_oop(_compact_point));
374 _compact_point += obj_size;
375 }
376 };
377
378 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
379 private:
380 PreservedMarksSet* const _preserved_marks;
381 ShenandoahHeap* const _heap;
382 ShenandoahHeapRegionSet** const _worker_slices;
383
384 public:
385 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) :
386 AbstractGangTask("Shenandoah Prepare For Compaction"),
387 _preserved_marks(preserved_marks),
388 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
389 }
390
391 static bool is_candidate_region(ShenandoahHeapRegion* r) {
392 // Empty region: get it into the slice to defragment the slice itself.
393 // We could have skipped this without violating correctness, but we really
394 // want to compact all live regions to the start of the heap, which sometimes
395 // means moving them into the fully empty regions.
396 if (r->is_empty()) return true;
397
398 // Can move the region, and this is not the humongous region. Humongous
399 // moves are special cased here, because their moves are handled separately.
400 return r->is_stw_move_allowed() && !r->is_humongous();
401 }
402
403 void work(uint worker_id) {
404 if (UseAltGCForwarding) {
405 work_impl<true>(worker_id);
406 } else {
407 work_impl<false>(worker_id);
408 }
409 }
410
411 private:
412 template <bool ALT_FWD>
413 void work_impl(uint worker_id) {
414 ShenandoahParallelWorkerSession worker_session(worker_id);
415 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
416 ShenandoahHeapRegionSetIterator it(slice);
417 ShenandoahHeapRegion* from_region = it.next();
418 // No work?
419 if (from_region == NULL) {
420 return;
421 }
422
423 // Sliding compaction. Walk all regions in the slice, and compact them.
424 // Remember empty regions and reuse them as needed.
425 ResourceMark rm;
426
427 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
428
429 ShenandoahPrepareForCompactionObjectClosure<ALT_FWD> cl(_preserved_marks->get(worker_id), empty_regions, from_region);
430
431 while (from_region != NULL) {
432 assert(is_candidate_region(from_region), "Sanity");
433
434 cl.set_from_region(from_region);
435 if (from_region->has_live()) {
436 _heap->marked_object_iterate(from_region, &cl);
437 }
438
439 // Compacted the region to somewhere else? From-region is empty then.
440 if (!cl.is_compact_same_region()) {
441 empty_regions.append(from_region);
442 }
443 from_region = it.next();
444 }
445 cl.finish_region();
446
447 // Mark all remaining regions as empty
448 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
449 ShenandoahHeapRegion* r = empty_regions.at(pos);
450 r->set_new_top(r->bottom());
451 }
452 }
453 };
454
455 template <bool ALT_FWD>
456 void ShenandoahFullGC::calculate_target_humongous_objects_impl() {
457 ShenandoahHeap* heap = ShenandoahHeap::heap();
458
459 // Compute the new addresses for humongous objects. We need to do this after addresses
460 // for regular objects are calculated, and we know what regions in heap suffix are
461 // available for humongous moves.
462 //
463 // Scan the heap backwards, because we are compacting humongous regions towards the end.
464 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
465 // humongous start there.
466 //
467 // The complication is potential non-movable regions during the scan. If such region is
468 // detected, then sliding restarts towards that non-movable region.
469
470 size_t to_begin = heap->num_regions();
471 size_t to_end = heap->num_regions();
472
473 for (size_t c = heap->num_regions(); c > 0; c--) {
474 ShenandoahHeapRegion *r = heap->get_region(c - 1);
475 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
476 // To-region candidate: record this, and continue scan
477 to_begin = r->index();
478 continue;
479 }
480
481 if (r->is_humongous_start() && r->is_stw_move_allowed()) {
482 // From-region candidate: movable humongous region
483 oop old_obj = cast_to_oop(r->bottom());
484 size_t words_size = old_obj->size();
485 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
486
487 size_t start = to_end - num_regions;
488
489 if (start >= to_begin && start != r->index()) {
490 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
491 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark());
492 SlidingForwarding::forward_to<ALT_FWD>(old_obj, cast_to_oop(heap->get_region(start)->bottom()));
493 to_end = start;
494 continue;
495 }
496 }
497
498 // Failed to fit. Scan starting from current region.
499 to_begin = r->index();
500 to_end = r->index();
501 }
502 }
503
504 void ShenandoahFullGC::calculate_target_humongous_objects() {
505 if (UseAltGCForwarding) {
506 calculate_target_humongous_objects_impl<true>();
507 } else {
508 calculate_target_humongous_objects_impl<false>();
509 }
510 }
511
512 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
513 private:
514 ShenandoahHeap* const _heap;
515
516 public:
517 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
518 void heap_region_do(ShenandoahHeapRegion* r) {
519 if (r->is_trash()) {
520 r->recycle();
521 }
522 if (r->is_cset()) {
523 r->make_regular_bypass();
524 }
525 if (r->is_empty_uncommitted()) {
526 r->make_committed_bypass();
527 }
528 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index());
529
530 // Record current region occupancy: this communicates empty regions are free
531 // to the rest of Full GC code.
729 heap->heap_region_iterate(&ecl);
730 }
731
732 // Compute the new addresses for regular objects
733 {
734 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
735
736 distribute_slices(worker_slices);
737
738 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
739 heap->workers()->run_task(&task);
740 }
741
742 // Compute the new addresses for humongous objects
743 {
744 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
745 calculate_target_humongous_objects();
746 }
747 }
748
749 template <bool ALT_FWD>
750 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
751 private:
752 ShenandoahHeap* const _heap;
753 ShenandoahMarkingContext* const _ctx;
754
755 template <class T>
756 inline void do_oop_work(T* p) {
757 T o = RawAccess<>::oop_load(p);
758 if (!CompressedOops::is_null(o)) {
759 oop obj = CompressedOops::decode_not_null(o);
760 assert(_ctx->is_marked(obj), "must be marked");
761 if (SlidingForwarding::is_forwarded(obj)) {
762 oop forw = SlidingForwarding::forwardee<ALT_FWD>(obj);
763 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
764 }
765 }
766 }
767
768 public:
769 ShenandoahAdjustPointersClosure() :
770 _heap(ShenandoahHeap::heap()),
771 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
772
773 void do_oop(oop* p) { do_oop_work(p); }
774 void do_oop(narrowOop* p) { do_oop_work(p); }
775 };
776
777 template <bool ALT_FWD>
778 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
779 private:
780 ShenandoahHeap* const _heap;
781 ShenandoahAdjustPointersClosure<ALT_FWD> _cl;
782
783 public:
784 ShenandoahAdjustPointersObjectClosure() :
785 _heap(ShenandoahHeap::heap()) {
786 }
787 void do_object(oop p) {
788 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
789 p->oop_iterate(&_cl);
790 }
791 };
792
793 class ShenandoahAdjustPointersTask : public AbstractGangTask {
794 private:
795 ShenandoahHeap* const _heap;
796 ShenandoahRegionIterator _regions;
797
798 public:
799 ShenandoahAdjustPointersTask() :
800 AbstractGangTask("Shenandoah Adjust Pointers"),
801 _heap(ShenandoahHeap::heap()) {
802 }
803
804 private:
805 template <bool ALT_FWD>
806 void work_impl(uint worker_id) {
807 ShenandoahParallelWorkerSession worker_session(worker_id);
808 ShenandoahAdjustPointersObjectClosure<ALT_FWD> obj_cl;
809 ShenandoahHeapRegion* r = _regions.next();
810 while (r != NULL) {
811 if (!r->is_humongous_continuation() && r->has_live()) {
812 _heap->marked_object_iterate(r, &obj_cl);
813 }
814 r = _regions.next();
815 }
816 }
817
818 public:
819 void work(uint worker_id) {
820 if (UseAltGCForwarding) {
821 work_impl<true>(worker_id);
822 } else {
823 work_impl<false>(worker_id);
824 }
825 }
826 };
827
828 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
829 private:
830 ShenandoahRootAdjuster* _rp;
831 PreservedMarksSet* _preserved_marks;
832
833 public:
834 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) :
835 AbstractGangTask("Shenandoah Adjust Root Pointers"),
836 _rp(rp),
837 _preserved_marks(preserved_marks) {}
838
839 private:
840 template <bool ALT_FWD>
841 void work_impl(uint worker_id) {
842 ShenandoahParallelWorkerSession worker_session(worker_id);
843 ShenandoahAdjustPointersClosure<ALT_FWD> cl;
844 _rp->roots_do(worker_id, &cl);
845 _preserved_marks->get(worker_id)->adjust_during_full_gc();
846 }
847
848 public:
849 void work(uint worker_id) {
850 if (UseAltGCForwarding) {
851 work_impl<true>(worker_id);
852 } else {
853 work_impl<false>(worker_id);
854 }
855 }
856 };
857
858 void ShenandoahFullGC::phase3_update_references() {
859 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer);
860 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
861
862 ShenandoahHeap* heap = ShenandoahHeap::heap();
863
864 WorkGang* workers = heap->workers();
865 uint nworkers = workers->active_workers();
866 {
867 #if COMPILER2_OR_JVMCI
868 DerivedPointerTable::clear();
869 #endif
870 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots);
871 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks);
872 workers->run_task(&task);
873 #if COMPILER2_OR_JVMCI
874 DerivedPointerTable::update_pointers();
875 #endif
876 }
877
878 ShenandoahAdjustPointersTask adjust_pointers_task;
879 workers->run_task(&adjust_pointers_task);
880 }
881
882 template <bool ALT_FWD>
883 class ShenandoahCompactObjectsClosure : public ObjectClosure {
884 private:
885 ShenandoahHeap* const _heap;
886 uint const _worker_id;
887
888 public:
889 ShenandoahCompactObjectsClosure(uint worker_id) :
890 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
891
892 void do_object(oop p) {
893 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
894 size_t size = (size_t)p->size();
895 if (SlidingForwarding::is_forwarded(p)) {
896 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
897 HeapWord* compact_to = cast_from_oop<HeapWord*>(SlidingForwarding::forwardee<ALT_FWD>(p));
898 Copy::aligned_conjoint_words(compact_from, compact_to, size);
899 oop new_obj = cast_to_oop(compact_to);
900 new_obj->init_mark();
901 }
902 }
903 };
904
905 class ShenandoahCompactObjectsTask : public AbstractGangTask {
906 private:
907 ShenandoahHeap* const _heap;
908 ShenandoahHeapRegionSet** const _worker_slices;
909
910 public:
911 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
912 AbstractGangTask("Shenandoah Compact Objects"),
913 _heap(ShenandoahHeap::heap()),
914 _worker_slices(worker_slices) {
915 }
916
917 private:
918 template <bool ALT_FWD>
919 void work_impl(uint worker_id) {
920 ShenandoahParallelWorkerSession worker_session(worker_id);
921 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
922
923 ShenandoahCompactObjectsClosure<ALT_FWD> cl(worker_id);
924 ShenandoahHeapRegion* r = slice.next();
925 while (r != NULL) {
926 assert(!r->is_humongous(), "must not get humongous regions here");
927 if (r->has_live()) {
928 _heap->marked_object_iterate(r, &cl);
929 }
930 r->set_top(r->new_top());
931 r = slice.next();
932 }
933 }
934
935 public:
936 void work(uint worker_id) {
937 if (UseAltGCForwarding) {
938 work_impl<true>(worker_id);
939 } else {
940 work_impl<false>(worker_id);
941 }
942 }
943 };
944
945 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
946 private:
947 ShenandoahHeap* const _heap;
948 size_t _live;
949
950 public:
951 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
952 _heap->free_set()->clear();
953 }
954
955 void heap_region_do(ShenandoahHeapRegion* r) {
956 assert (!r->is_cset(), "cset regions should have been demoted already");
957
958 // Need to reset the complete-top-at-mark-start pointer here because
959 // the complete marking bitmap is no longer valid. This ensures
960 // size-based iteration in marked_object_iterate().
961 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
962 // pinned regions.
978 if (r->is_regular() && live == 0) {
979 r->make_trash();
980 }
981
982 // Recycle all trash regions
983 if (r->is_trash()) {
984 live = 0;
985 r->recycle();
986 }
987
988 r->set_live_data(live);
989 r->reset_alloc_metadata();
990 _live += live;
991 }
992
993 size_t get_live() {
994 return _live;
995 }
996 };
997
998 template <bool ALT_FWD>
999 void ShenandoahFullGC::compact_humongous_objects_impl() {
1000 // Compact humongous regions, based on their fwdptr objects.
1001 //
1002 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
1003 // humongous regions are already compacted, and do not require further moves, which alleviates
1004 // sliding costs. We may consider doing this in parallel in future.
1005
1006 ShenandoahHeap* heap = ShenandoahHeap::heap();
1007
1008 for (size_t c = heap->num_regions(); c > 0; c--) {
1009 ShenandoahHeapRegion* r = heap->get_region(c - 1);
1010 if (r->is_humongous_start()) {
1011 oop old_obj = cast_to_oop(r->bottom());
1012 if (SlidingForwarding::is_not_forwarded(old_obj)) {
1013 // No need to move the object, it stays at the same slot
1014 continue;
1015 }
1016 size_t words_size = old_obj->size();
1017 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1018
1019 size_t old_start = r->index();
1020 size_t old_end = old_start + num_regions - 1;
1021 size_t new_start = heap->heap_region_index_containing(SlidingForwarding::forwardee<ALT_FWD>(old_obj));
1022 size_t new_end = new_start + num_regions - 1;
1023 assert(old_start != new_start, "must be real move");
1024 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index());
1025
1026 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
1027 heap->get_region(new_start)->bottom(),
1028 words_size);
1029
1030 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1031 new_obj->init_mark();
1032
1033 {
1034 for (size_t c = old_start; c <= old_end; c++) {
1035 ShenandoahHeapRegion* r = heap->get_region(c);
1036 r->make_regular_bypass();
1037 r->set_top(r->bottom());
1038 }
1039
1040 for (size_t c = new_start; c <= new_end; c++) {
1041 ShenandoahHeapRegion* r = heap->get_region(c);
1043 r->make_humongous_start_bypass();
1044 } else {
1045 r->make_humongous_cont_bypass();
1046 }
1047
1048 // Trailing region may be non-full, record the remainder there
1049 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
1050 if ((c == new_end) && (remainder != 0)) {
1051 r->set_top(r->bottom() + remainder);
1052 } else {
1053 r->set_top(r->end());
1054 }
1055
1056 r->reset_alloc_metadata();
1057 }
1058 }
1059 }
1060 }
1061 }
1062
1063 void ShenandoahFullGC::compact_humongous_objects() {
1064 if (UseAltGCForwarding) {
1065 compact_humongous_objects_impl<true>();
1066 } else {
1067 compact_humongous_objects_impl<false>();
1068 }
1069 }
1070
1071 // This is slightly different to ShHeap::reset_next_mark_bitmap:
1072 // we need to remain able to walk pinned regions.
1073 // Since pinned region do not move and don't get compacted, we will get holes with
1074 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
1075 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
1076 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
1077 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
1078 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
1079 private:
1080 ShenandoahRegionIterator _regions;
1081
1082 public:
1083 ShenandoahMCResetCompleteBitmapTask() :
1084 AbstractGangTask("Shenandoah Reset Bitmap") {
1085 }
1086
1087 void work(uint worker_id) {
1088 ShenandoahParallelWorkerSession worker_session(worker_id);
1089 ShenandoahHeapRegion* region = _regions.next();
1090 ShenandoahHeap* heap = ShenandoahHeap::heap();
|