227 // Coming out of Full GC, we would not have any forwarded objects.
228 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
229 heap->set_has_forwarded_objects(false);
230
231 heap->set_full_gc_move_in_progress(true);
232
233 // Setup workers for the rest
234 OrderAccess::fence();
235
236 // Initialize worker slices
237 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
238 for (uint i = 0; i < heap->max_workers(); i++) {
239 worker_slices[i] = new ShenandoahHeapRegionSet();
240 }
241
242 {
243 // The rest of code performs region moves, where region status is undefined
244 // until all phases run together.
245 ShenandoahHeapLocker lock(heap->lock());
246
247 phase2_calculate_target_addresses(worker_slices);
248
249 OrderAccess::fence();
250
251 phase3_update_references();
252
253 phase4_compact_objects(worker_slices);
254
255 phase5_epilog();
256 }
257
258 // Resize metaspace
259 MetaspaceGC::compute_new_size();
260
261 // Free worker slices
262 for (uint i = 0; i < heap->max_workers(); i++) {
263 delete worker_slices[i];
264 }
265 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
266
267 heap->set_full_gc_move_in_progress(false);
268 heap->set_full_gc_in_progress(false);
269
270 if (ShenandoahVerify) {
271 heap->verifier()->verify_after_fullgc();
272 }
273
274 if (VerifyAfterGC) {
275 Universe::verify();
333 }
334
335 void finish() {
336 assert(_to_region != nullptr, "should not happen");
337 _to_region->set_new_top(_compact_point);
338 }
339
340 bool is_compact_same_region() {
341 return _from_region == _to_region;
342 }
343
344 int empty_regions_pos() {
345 return _empty_regions_pos;
346 }
347
348 void do_object(oop p) {
349 assert(_from_region != nullptr, "must set before work");
350 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
351 assert(!_heap->gc_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
352
353 size_t obj_size = p->size();
354 if (_compact_point + obj_size > _to_region->end()) {
355 finish();
356
357 // Object doesn't fit. Pick next empty region and start compacting there.
358 ShenandoahHeapRegion* new_to_region;
359 if (_empty_regions_pos < _empty_regions.length()) {
360 new_to_region = _empty_regions.at(_empty_regions_pos);
361 _empty_regions_pos++;
362 } else {
363 // Out of empty region? Compact within the same region.
364 new_to_region = _from_region;
365 }
366
367 assert(new_to_region != _to_region, "must not reuse same to-region");
368 assert(new_to_region != nullptr, "must not be null");
369 _to_region = new_to_region;
370 _compact_point = _to_region->bottom();
371 }
372
373 // Object fits into current region, record new location, if object does not move:
374 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
375 shenandoah_assert_not_forwarded(nullptr, p);
376 if (_compact_point != cast_from_oop<HeapWord*>(p)) {
377 _preserved_marks->push_if_necessary(p, p->mark());
378 FullGCForwarding::forward_to(p, cast_to_oop(_compact_point));
379 }
380 _compact_point += obj_size;
381 }
382 };
383
384 class ShenandoahPrepareForCompactionTask : public WorkerTask {
385 private:
386 PreservedMarksSet* const _preserved_marks;
387 ShenandoahHeap* const _heap;
388 ShenandoahHeapRegionSet** const _worker_slices;
389
390 public:
870 private:
871 ShenandoahHeap* const _heap;
872 uint const _worker_id;
873
874 public:
875 ShenandoahCompactObjectsClosure(uint worker_id) :
876 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
877
878 void do_object(oop p) {
879 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
880 size_t size = p->size();
881 if (FullGCForwarding::is_forwarded(p)) {
882 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
883 HeapWord* compact_to = cast_from_oop<HeapWord*>(FullGCForwarding::forwardee(p));
884 assert(compact_from != compact_to, "Forwarded object should move");
885 Copy::aligned_conjoint_words(compact_from, compact_to, size);
886 oop new_obj = cast_to_oop(compact_to);
887
888 ContinuationGCSupport::relativize_stack_chunk(new_obj);
889 new_obj->init_mark();
890 }
891 }
892 };
893
894 class ShenandoahCompactObjectsTask : public WorkerTask {
895 private:
896 ShenandoahHeap* const _heap;
897 ShenandoahHeapRegionSet** const _worker_slices;
898
899 public:
900 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
901 WorkerTask("Shenandoah Compact Objects"),
902 _heap(ShenandoahHeap::heap()),
903 _worker_slices(worker_slices) {
904 }
905
906 void work(uint worker_id) {
907 ShenandoahParallelWorkerSession worker_session(worker_id);
908 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
909
|
227 // Coming out of Full GC, we would not have any forwarded objects.
228 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3.
229 heap->set_has_forwarded_objects(false);
230
231 heap->set_full_gc_move_in_progress(true);
232
233 // Setup workers for the rest
234 OrderAccess::fence();
235
236 // Initialize worker slices
237 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
238 for (uint i = 0; i < heap->max_workers(); i++) {
239 worker_slices[i] = new ShenandoahHeapRegionSet();
240 }
241
242 {
243 // The rest of code performs region moves, where region status is undefined
244 // until all phases run together.
245 ShenandoahHeapLocker lock(heap->lock());
246
247 FullGCForwarding::begin();
248
249 phase2_calculate_target_addresses(worker_slices);
250
251 OrderAccess::fence();
252
253 phase3_update_references();
254
255 phase4_compact_objects(worker_slices);
256
257 phase5_epilog();
258
259 FullGCForwarding::end();
260 }
261
262 // Resize metaspace
263 MetaspaceGC::compute_new_size();
264
265 // Free worker slices
266 for (uint i = 0; i < heap->max_workers(); i++) {
267 delete worker_slices[i];
268 }
269 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
270
271 heap->set_full_gc_move_in_progress(false);
272 heap->set_full_gc_in_progress(false);
273
274 if (ShenandoahVerify) {
275 heap->verifier()->verify_after_fullgc();
276 }
277
278 if (VerifyAfterGC) {
279 Universe::verify();
337 }
338
339 void finish() {
340 assert(_to_region != nullptr, "should not happen");
341 _to_region->set_new_top(_compact_point);
342 }
343
344 bool is_compact_same_region() {
345 return _from_region == _to_region;
346 }
347
348 int empty_regions_pos() {
349 return _empty_regions_pos;
350 }
351
352 void do_object(oop p) {
353 assert(_from_region != nullptr, "must set before work");
354 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
355 assert(!_heap->gc_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
356
357 size_t old_size = p->size();
358 size_t new_size = p->copy_size(old_size, p->mark());
359 size_t obj_size = _compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
360 if (_compact_point + obj_size > _to_region->end()) {
361 finish();
362
363 // Object doesn't fit. Pick next empty region and start compacting there.
364 ShenandoahHeapRegion* new_to_region;
365 if (_empty_regions_pos < _empty_regions.length()) {
366 new_to_region = _empty_regions.at(_empty_regions_pos);
367 _empty_regions_pos++;
368 } else {
369 // Out of empty region? Compact within the same region.
370 new_to_region = _from_region;
371 }
372
373 assert(new_to_region != _to_region, "must not reuse same to-region");
374 assert(new_to_region != nullptr, "must not be null");
375 _to_region = new_to_region;
376 _compact_point = _to_region->bottom();
377 obj_size = _compact_point == cast_from_oop<HeapWord*>(p) ? old_size : new_size;
378 }
379
380 // Object fits into current region, record new location, if object does not move:
381 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
382 shenandoah_assert_not_forwarded(nullptr, p);
383 if (_compact_point != cast_from_oop<HeapWord*>(p)) {
384 _preserved_marks->push_if_necessary(p, p->mark());
385 FullGCForwarding::forward_to(p, cast_to_oop(_compact_point));
386 }
387 _compact_point += obj_size;
388 }
389 };
390
391 class ShenandoahPrepareForCompactionTask : public WorkerTask {
392 private:
393 PreservedMarksSet* const _preserved_marks;
394 ShenandoahHeap* const _heap;
395 ShenandoahHeapRegionSet** const _worker_slices;
396
397 public:
877 private:
878 ShenandoahHeap* const _heap;
879 uint const _worker_id;
880
881 public:
882 ShenandoahCompactObjectsClosure(uint worker_id) :
883 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
884
885 void do_object(oop p) {
886 assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
887 size_t size = p->size();
888 if (FullGCForwarding::is_forwarded(p)) {
889 HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
890 HeapWord* compact_to = cast_from_oop<HeapWord*>(FullGCForwarding::forwardee(p));
891 assert(compact_from != compact_to, "Forwarded object should move");
892 Copy::aligned_conjoint_words(compact_from, compact_to, size);
893 oop new_obj = cast_to_oop(compact_to);
894
895 ContinuationGCSupport::relativize_stack_chunk(new_obj);
896 new_obj->init_mark();
897 new_obj->initialize_hash_if_necessary(p);
898 }
899 }
900 };
901
902 class ShenandoahCompactObjectsTask : public WorkerTask {
903 private:
904 ShenandoahHeap* const _heap;
905 ShenandoahHeapRegionSet** const _worker_slices;
906
907 public:
908 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
909 WorkerTask("Shenandoah Compact Objects"),
910 _heap(ShenandoahHeap::heap()),
911 _worker_slices(worker_slices) {
912 }
913
914 void work(uint worker_id) {
915 ShenandoahParallelWorkerSession worker_session(worker_id);
916 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
917
|