225 // Coming out of Full GC, we would not have any forwarded objects.
226 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
227 heap->set_has_forwarded_objects(false);
228
229 heap->set_full_gc_move_in_progress(true);
230
231 // Setup workers for the rest
232 OrderAccess::fence();
233
234 // Initialize worker slices
235 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
236 for (uint i = 0; i < heap->max_workers(); i++) {
237 worker_slices[i] = new ShenandoahHeapRegionSet();
238 }
239
240 {
241 // The rest of code performs region moves, where region status is undefined
242 // until all phases run together.
243 ShenandoahHeapLocker lock(heap->lock());
244
245 phase2_calculate_target_addresses(worker_slices);
246
247 OrderAccess::fence();
248
249 phase3_update_references();
250
251 phase4_compact_objects(worker_slices);
252
253 phase5_epilog();
254 }
255 heap->start_idle_span();
256
257 // Resize metaspace
258 MetaspaceGC::compute_new_size();
259
260 // Free worker slices
261 for (uint i = 0; i < heap->max_workers(); i++) {
262 delete worker_slices[i];
263 }
264 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
265
266 heap->set_full_gc_move_in_progress(false);
267 heap->set_full_gc_in_progress(false);
268
269 if (ShenandoahVerify) {
270 heap->verifier()->verify_after_fullgc(_generation);
271 }
272
273 if (VerifyAfterGC) {
334 void finish() {
335 assert(_to_region != nullptr, "should not happen");
336 _to_region->set_new_top(_compact_point);
337 }
338
339 bool is_compact_same_region() {
340 return _from_region == _to_region;
341 }
342
343 int empty_regions_pos() {
344 return _empty_regions_pos;
345 }
346
347 void do_object(oop p) override {
348 shenandoah_assert_mark_complete(cast_from_oop<HeapWord*>(p));
349 assert(_from_region != nullptr, "must set before work");
350 assert(_heap->global_generation()->is_mark_complete(), "marking must be finished");
351 assert(_heap->marking_context()->is_marked(p), "must be marked");
352 assert(!_heap->marking_context()->allocated_after_mark_start(p), "must be truly marked");
353
354 size_t obj_size = p->size();
355 if (_compact_point + obj_size > _to_region->end()) {
356 finish();
357
358 // Object doesn't fit. Pick next empty region and start compacting there.
359 ShenandoahHeapRegion* new_to_region;
360 if (_empty_regions_pos < _empty_regions.length()) {
361 new_to_region = _empty_regions.at(_empty_regions_pos);
362 _empty_regions_pos++;
363 } else {
364 // Out of empty region? Compact within the same region.
365 new_to_region = _from_region;
366 }
367
368 assert(new_to_region != _to_region, "must not reuse same to-region");
369 assert(new_to_region != nullptr, "must not be null");
370 _to_region = new_to_region;
371 _compact_point = _to_region->bottom();
372 }
373
374 // Object fits into current region, record new location, if object does not move:
375 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
376 shenandoah_assert_not_forwarded(nullptr, p);
377 if (_compact_point != cast_from_oop<HeapWord*>(p)) {
378 _preserved_marks->push_if_necessary(p, p->mark());
379 FullGCForwarding::forward_to(p, cast_to_oop(_compact_point));
380 }
381 _compact_point += obj_size;
382 }
383 };
384
385 class ShenandoahPrepareForCompactionTask : public WorkerTask {
386 private:
387 PreservedMarksSet* const _preserved_marks;
388 ShenandoahHeap* const _heap;
389 ShenandoahHeapRegionSet** const _worker_slices;
390
391 public:
862 class ShenandoahCompactObjectsClosure : public ObjectClosure {
863 private:
864 uint const _worker_id;
865
866 public:
867 explicit ShenandoahCompactObjectsClosure(uint worker_id) :
868 _worker_id(worker_id) {}
869
870 void do_object(oop p) override {
871 assert(ShenandoahHeap::heap()->global_generation()->is_mark_complete(), "marking must be finished");
872 assert(ShenandoahHeap::heap()->marking_context()->is_marked(p), "must be marked");
873 size_t size = p->size();
874 if (FullGCForwarding::is_forwarded(p)) {
875 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
876 HeapWord* compact_to = cast_from_oop<HeapWord*>(FullGCForwarding::forwardee(p));
877 assert(compact_from != compact_to, "Forwarded object should move");
878 Copy::aligned_conjoint_words(compact_from, compact_to, size);
879 oop new_obj = cast_to_oop(compact_to);
880
881 ContinuationGCSupport::relativize_stack_chunk(new_obj);
882 new_obj->init_mark();
883 }
884 }
885 };
886
887 class ShenandoahCompactObjectsTask : public WorkerTask {
888 private:
889 ShenandoahHeap* const _heap;
890 ShenandoahHeapRegionSet** const _worker_slices;
891
892 public:
893 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
894 WorkerTask("Shenandoah Compact Objects"),
895 _heap(ShenandoahHeap::heap()),
896 _worker_slices(worker_slices) {
897 }
898
899 void work(uint worker_id) override {
900 ShenandoahParallelWorkerSession worker_session(worker_id);
901 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
902
996 oop old_obj = cast_to_oop(r->bottom());
997 if (!FullGCForwarding::is_forwarded(old_obj)) {
998 // No need to move the object, it stays at the same slot
999 continue;
1000 }
1001 size_t words_size = old_obj->size();
1002 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1003
1004 size_t old_start = r->index();
1005 size_t old_end = old_start + num_regions - 1;
1006 size_t new_start = heap->heap_region_index_containing(FullGCForwarding::forwardee(old_obj));
1007 size_t new_end = new_start + num_regions - 1;
1008 assert(old_start != new_start, "must be real move");
1009 assert(r->is_stw_move_allowed(), "Region %zu should be movable", r->index());
1010
1011 log_debug(gc)("Full GC compaction moves humongous object from region %zu to region %zu", old_start, new_start);
1012 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1013 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1014
1015 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1016 new_obj->init_mark();
1017
1018 {
1019 ShenandoahAffiliation original_affiliation = r->affiliation();
1020 for (size_t c = old_start; c <= old_end; c++) {
1021 ShenandoahHeapRegion* r = heap->get_region(c);
1022 // Leave humongous region affiliation unchanged.
1023 r->make_regular_bypass();
1024 r->set_top(r->bottom());
1025 }
1026
1027 for (size_t c = new_start; c <= new_end; c++) {
1028 ShenandoahHeapRegion* r = heap->get_region(c);
1029 if (c == new_start) {
1030 r->make_humongous_start_bypass(original_affiliation);
1031 } else {
1032 r->make_humongous_cont_bypass(original_affiliation);
1033 }
1034
1035 // Trailing region may be non-full, record the remainder there
1036 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
|
225 // Coming out of Full GC, we would not have any forwarded objects.
226 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
227 heap->set_has_forwarded_objects(false);
228
229 heap->set_full_gc_move_in_progress(true);
230
231 // Setup workers for the rest
232 OrderAccess::fence();
233
234 // Initialize worker slices
235 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
236 for (uint i = 0; i < heap->max_workers(); i++) {
237 worker_slices[i] = new ShenandoahHeapRegionSet();
238 }
239
240 {
241 // The rest of code performs region moves, where region status is undefined
242 // until all phases run together.
243 ShenandoahHeapLocker lock(heap->lock());
244
245 FullGCForwarding::begin();
246
247 phase2_calculate_target_addresses(worker_slices);
248
249 OrderAccess::fence();
250
251 phase3_update_references();
252
253 phase4_compact_objects(worker_slices);
254
255 phase5_epilog();
256
257 FullGCForwarding::end();
258 }
259 heap->start_idle_span();
260
261 // Resize metaspace
262 MetaspaceGC::compute_new_size();
263
264 // Free worker slices
265 for (uint i = 0; i < heap->max_workers(); i++) {
266 delete worker_slices[i];
267 }
268 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
269
270 heap->set_full_gc_move_in_progress(false);
271 heap->set_full_gc_in_progress(false);
272
273 if (ShenandoahVerify) {
274 heap->verifier()->verify_after_fullgc(_generation);
275 }
276
277 if (VerifyAfterGC) {
338 void finish() {
339 assert(_to_region != nullptr, "should not happen");
340 _to_region->set_new_top(_compact_point);
341 }
342
343 bool is_compact_same_region() {
344 return _from_region == _to_region;
345 }
346
347 int empty_regions_pos() {
348 return _empty_regions_pos;
349 }
350
351 void do_object(oop p) override {
352 shenandoah_assert_mark_complete(cast_from_oop<HeapWord*>(p));
353 assert(_from_region != nullptr, "must set before work");
354 assert(_heap->global_generation()->is_mark_complete(), "marking must be finished");
355 assert(_heap->marking_context()->is_marked(p), "must be marked");
356 assert(!_heap->marking_context()->allocated_after_mark_start(p), "must be truly marked");
357
358 size_t old_size = p->size();
359 size_t new_size = p->copy_size(old_size, p->mark());
360 size_t obj_size = _compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
361 if (_compact_point + obj_size > _to_region->end()) {
362 finish();
363
364 // Object doesn't fit. Pick next empty region and start compacting there.
365 ShenandoahHeapRegion* new_to_region;
366 if (_empty_regions_pos < _empty_regions.length()) {
367 new_to_region = _empty_regions.at(_empty_regions_pos);
368 _empty_regions_pos++;
369 } else {
370 // Out of empty region? Compact within the same region.
371 new_to_region = _from_region;
372 }
373
374 assert(new_to_region != _to_region, "must not reuse same to-region");
375 assert(new_to_region != nullptr, "must not be null");
376 _to_region = new_to_region;
377 _compact_point = _to_region->bottom();
378 obj_size = _compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
379 }
380
381 // Object fits into current region, record new location, if object does not move:
382 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
383 shenandoah_assert_not_forwarded(nullptr, p);
384 if (_compact_point != cast_from_oop<HeapWord*>(p)) {
385 _preserved_marks->push_if_necessary(p, p->mark());
386 FullGCForwarding::forward_to(p, cast_to_oop(_compact_point));
387 }
388 _compact_point += obj_size;
389 }
390 };
391
392 class ShenandoahPrepareForCompactionTask : public WorkerTask {
393 private:
394 PreservedMarksSet* const _preserved_marks;
395 ShenandoahHeap* const _heap;
396 ShenandoahHeapRegionSet** const _worker_slices;
397
398 public:
869 class ShenandoahCompactObjectsClosure : public ObjectClosure {
870 private:
871 uint const _worker_id;
872
873 public:
874 explicit ShenandoahCompactObjectsClosure(uint worker_id) :
875 _worker_id(worker_id) {}
876
877 void do_object(oop p) override {
878 assert(ShenandoahHeap::heap()->global_generation()->is_mark_complete(), "marking must be finished");
879 assert(ShenandoahHeap::heap()->marking_context()->is_marked(p), "must be marked");
880 size_t size = p->size();
881 if (FullGCForwarding::is_forwarded(p)) {
882 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
883 HeapWord* compact_to = cast_from_oop<HeapWord*>(FullGCForwarding::forwardee(p));
884 assert(compact_from != compact_to, "Forwarded object should move");
885 Copy::aligned_conjoint_words(compact_from, compact_to, size);
886 oop new_obj = cast_to_oop(compact_to);
887
888 ContinuationGCSupport::relativize_stack_chunk(new_obj);
889 new_obj->reinit_mark();
890 new_obj->initialize_hash_if_necessary(p);
891 }
892 }
893 };
894
895 class ShenandoahCompactObjectsTask : public WorkerTask {
896 private:
897 ShenandoahHeap* const _heap;
898 ShenandoahHeapRegionSet** const _worker_slices;
899
900 public:
901 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
902 WorkerTask("Shenandoah Compact Objects"),
903 _heap(ShenandoahHeap::heap()),
904 _worker_slices(worker_slices) {
905 }
906
907 void work(uint worker_id) override {
908 ShenandoahParallelWorkerSession worker_session(worker_id);
909 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
910
1004 oop old_obj = cast_to_oop(r->bottom());
1005 if (!FullGCForwarding::is_forwarded(old_obj)) {
1006 // No need to move the object, it stays at the same slot
1007 continue;
1008 }
1009 size_t words_size = old_obj->size();
1010 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1011
1012 size_t old_start = r->index();
1013 size_t old_end = old_start + num_regions - 1;
1014 size_t new_start = heap->heap_region_index_containing(FullGCForwarding::forwardee(old_obj));
1015 size_t new_end = new_start + num_regions - 1;
1016 assert(old_start != new_start, "must be real move");
1017 assert(r->is_stw_move_allowed(), "Region %zu should be movable", r->index());
1018
1019 log_debug(gc)("Full GC compaction moves humongous object from region %zu to region %zu", old_start, new_start);
1020 Copy::aligned_conjoint_words(r->bottom(), heap->get_region(new_start)->bottom(), words_size);
1021 ContinuationGCSupport::relativize_stack_chunk(cast_to_oop<HeapWord*>(r->bottom()));
1022
1023 oop new_obj = cast_to_oop(heap->get_region(new_start)->bottom());
1024 new_obj->reinit_mark();
1025
1026 {
1027 ShenandoahAffiliation original_affiliation = r->affiliation();
1028 for (size_t c = old_start; c <= old_end; c++) {
1029 ShenandoahHeapRegion* r = heap->get_region(c);
1030 // Leave humongous region affiliation unchanged.
1031 r->make_regular_bypass();
1032 r->set_top(r->bottom());
1033 }
1034
1035 for (size_t c = new_start; c <= new_end; c++) {
1036 ShenandoahHeapRegion* r = heap->get_region(c);
1037 if (c == new_start) {
1038 r->make_humongous_start_bypass(original_affiliation);
1039 } else {
1040 r->make_humongous_cont_bypass(original_affiliation);
1041 }
1042
1043 // Trailing region may be non-full, record the remainder there
1044 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
|