23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "gc/g1/g1CollectedHeap.hpp"
28 #include "gc/g1/g1FullCollector.inline.hpp"
29 #include "gc/g1/g1FullGCAdjustTask.hpp"
30 #include "gc/g1/g1FullGCCompactTask.hpp"
31 #include "gc/g1/g1FullGCMarker.inline.hpp"
32 #include "gc/g1/g1FullGCMarkTask.hpp"
33 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
34 #include "gc/g1/g1FullGCResetMetadataTask.hpp"
35 #include "gc/g1/g1FullGCScope.hpp"
36 #include "gc/g1/g1OopClosures.hpp"
37 #include "gc/g1/g1Policy.hpp"
38 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
39 #include "gc/shared/gcTraceTime.inline.hpp"
40 #include "gc/shared/preservedMarks.inline.hpp"
41 #include "gc/shared/classUnloadingContext.hpp"
42 #include "gc/shared/referenceProcessor.hpp"
43 #include "gc/shared/verifyOption.hpp"
44 #include "gc/shared/weakProcessor.inline.hpp"
45 #include "gc/shared/workerPolicy.hpp"
46 #include "logging/log.hpp"
47 #include "runtime/handles.inline.hpp"
48 #include "utilities/debug.hpp"
49
50 static void clear_and_activate_derived_pointers() {
51 #if COMPILER2_OR_JVMCI
52 DerivedPointerTable::clear();
53 #endif
54 }
55
56 static void deactivate_derived_pointers() {
57 #if COMPILER2_OR_JVMCI
58 DerivedPointerTable::set_active(false);
59 #endif
60 }
61
62 static void update_derived_pointers() {
192 _heap->prepare_heap_for_full_collection();
193
194 PrepareRegionsClosure cl(this);
195 _heap->heap_region_iterate(&cl);
196
197 reference_processor()->start_discovery(scope()->should_clear_soft_refs());
198
199 // Clear and activate derived pointer collection.
200 clear_and_activate_derived_pointers();
201 }
202
203 void G1FullCollector::collect() {
204 G1CollectedHeap::start_codecache_marking_cycle_if_inactive(false /* concurrent_mark_start */);
205
206 phase1_mark_live_objects();
207 verify_after_marking();
208
209 // Don't add any more derived pointers during later phases
210 deactivate_derived_pointers();
211
212 phase2_prepare_compaction();
213
214 if (has_compaction_targets()) {
215 phase3_adjust_pointers();
216
217 phase4_do_compaction();
218 } else {
219 // All regions have a high live ratio thus will not be compacted.
220 // The live ratio is only considered if do_maximal_compaction is false.
221 log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap");
222 }
223
224 phase5_reset_metadata();
225
226 G1CollectedHeap::finish_codecache_marking_cycle();
227 }
228
229 void G1FullCollector::complete_collection() {
230 // Restore all marks.
231 restore_marks();
232
233 // When the pointers have been adjusted and moved, we can
234 // update the derived pointer table.
235 update_derived_pointers();
236
237 // Need completely cleared claim bits for the next concurrent marking or full gc.
238 ClassLoaderDataGraph::clear_claimed_marks();
239
240 // Prepare the bitmap for the next (potentially concurrent) marking.
241 _heap->concurrent_mark()->clear_bitmap(_heap->workers());
242
243 _heap->prepare_for_mutator_after_full_collection();
372 }
373
374 uint G1FullCollector::truncate_parallel_cps() {
375 uint lowest_current = UINT_MAX;
376 for (uint i = 0; i < workers(); i++) {
377 G1FullGCCompactionPoint* cp = compaction_point(i);
378 if (cp->has_regions()) {
379 lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
380 }
381 }
382
383 for (uint i = 0; i < workers(); i++) {
384 G1FullGCCompactionPoint* cp = compaction_point(i);
385 if (cp->has_regions()) {
386 cp->remove_at_or_above(lowest_current);
387 }
388 }
389 return lowest_current;
390 }
391
392 void G1FullCollector::phase2c_prepare_serial_compaction() {
393 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
394 // At this point, we know that after parallel compaction there will be regions that
395 // are partially compacted into. Thus, the last compaction region of all
396 // compaction queues still have space in them. We try to re-compact these regions
397 // in serial to avoid a premature OOM when the mutator wants to allocate the first
398 // eden region after gc.
399
400 // For maximum compaction, we need to re-prepare all objects above the lowest
401 // region among the current regions for all thread compaction points. It may
402 // happen that due to the uneven distribution of objects to parallel threads, holes
403 // have been created as threads compact to different target regions between the
404 // lowest and the highest region in the tails of the compaction points.
405
406 uint start_serial = truncate_parallel_cps();
407 assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues");
408
409 G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
410 assert(!serial_cp->is_initialized(), "sanity!");
411
412 HeapRegion* start_hr = _heap->region_at(start_serial);
413 serial_cp->add(start_hr);
414 serial_cp->initialize(start_hr);
415
416 HeapWord* dense_prefix_top = compaction_top(start_hr);
417 G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top);
418
419 for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
420 if (is_compaction_target(i)) {
421 HeapRegion* current = _heap->region_at(i);
422 set_compaction_top(current, current->bottom());
423 serial_cp->add(current);
424 current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
425 }
426 }
427 serial_cp->update();
428 }
429
430 void G1FullCollector::phase2d_prepare_humongous_compaction() {
431 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
432 G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
433 assert(serial_cp->has_regions(), "Sanity!" );
434
435 uint last_serial_target = serial_cp->current_region()->hrm_index();
436 uint region_index = last_serial_target + 1;
437 uint max_reserved_regions = _heap->max_reserved_regions();
438
439 G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point();
440
441 while (region_index < max_reserved_regions) {
442 HeapRegion* hr = _heap->region_at_or_null(region_index);
443
444 if (hr == nullptr) {
445 region_index++;
446 continue;
447 } else if (hr->is_starts_humongous()) {
448 uint num_regions = humongous_cp->forward_humongous(hr);
449 region_index += num_regions; // Skip over the continues humongous regions.
450 continue;
451 } else if (is_compaction_target(region_index)) {
452 // Add the region to the humongous compaction point.
453 humongous_cp->add(hr);
454 }
455 region_index++;
456 }
457 }
458
459 void G1FullCollector::phase3_adjust_pointers() {
460 // Adjust the pointers to reflect the new locations
461 GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
462
463 G1FullGCAdjustTask task(this);
464 run_task(&task);
465 }
466
467 void G1FullCollector::phase4_do_compaction() {
468 // Compact the heap using the compaction queues created in phase 2.
469 GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
470 G1FullGCCompactTask task(this);
471 run_task(&task);
472
473 // Serial compact to avoid OOM when very few free regions.
474 if (serial_compaction_point()->has_regions()) {
475 task.serial_compaction();
476 }
477
478 if (!_humongous_compaction_regions.is_empty()) {
|
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "gc/g1/g1CollectedHeap.hpp"
28 #include "gc/g1/g1FullCollector.inline.hpp"
29 #include "gc/g1/g1FullGCAdjustTask.hpp"
30 #include "gc/g1/g1FullGCCompactTask.hpp"
31 #include "gc/g1/g1FullGCMarker.inline.hpp"
32 #include "gc/g1/g1FullGCMarkTask.hpp"
33 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
34 #include "gc/g1/g1FullGCResetMetadataTask.hpp"
35 #include "gc/g1/g1FullGCScope.hpp"
36 #include "gc/g1/g1OopClosures.hpp"
37 #include "gc/g1/g1Policy.hpp"
38 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
39 #include "gc/shared/gcTraceTime.inline.hpp"
40 #include "gc/shared/preservedMarks.inline.hpp"
41 #include "gc/shared/classUnloadingContext.hpp"
42 #include "gc/shared/referenceProcessor.hpp"
43 #include "gc/shared/slidingForwarding.hpp"
44 #include "gc/shared/verifyOption.hpp"
45 #include "gc/shared/weakProcessor.inline.hpp"
46 #include "gc/shared/workerPolicy.hpp"
47 #include "logging/log.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "utilities/debug.hpp"
50
51 static void clear_and_activate_derived_pointers() {
52 #if COMPILER2_OR_JVMCI
53 DerivedPointerTable::clear();
54 #endif
55 }
56
57 static void deactivate_derived_pointers() {
58 #if COMPILER2_OR_JVMCI
59 DerivedPointerTable::set_active(false);
60 #endif
61 }
62
63 static void update_derived_pointers() {
193 _heap->prepare_heap_for_full_collection();
194
195 PrepareRegionsClosure cl(this);
196 _heap->heap_region_iterate(&cl);
197
198 reference_processor()->start_discovery(scope()->should_clear_soft_refs());
199
200 // Clear and activate derived pointer collection.
201 clear_and_activate_derived_pointers();
202 }
203
204 void G1FullCollector::collect() {
205 G1CollectedHeap::start_codecache_marking_cycle_if_inactive(false /* concurrent_mark_start */);
206
207 phase1_mark_live_objects();
208 verify_after_marking();
209
210 // Don't add any more derived pointers during later phases
211 deactivate_derived_pointers();
212
213 SlidingForwarding::begin();
214
215 phase2_prepare_compaction();
216
217 if (has_compaction_targets()) {
218 phase3_adjust_pointers();
219
220 phase4_do_compaction();
221 } else {
222 // All regions have a high live ratio thus will not be compacted.
223 // The live ratio is only considered if do_maximal_compaction is false.
224 log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap");
225 }
226
227 SlidingForwarding::end();
228
229 phase5_reset_metadata();
230
231 G1CollectedHeap::finish_codecache_marking_cycle();
232 }
233
234 void G1FullCollector::complete_collection() {
235 // Restore all marks.
236 restore_marks();
237
238 // When the pointers have been adjusted and moved, we can
239 // update the derived pointer table.
240 update_derived_pointers();
241
242 // Need completely cleared claim bits for the next concurrent marking or full gc.
243 ClassLoaderDataGraph::clear_claimed_marks();
244
245 // Prepare the bitmap for the next (potentially concurrent) marking.
246 _heap->concurrent_mark()->clear_bitmap(_heap->workers());
247
248 _heap->prepare_for_mutator_after_full_collection();
377 }
378
379 uint G1FullCollector::truncate_parallel_cps() {
380 uint lowest_current = UINT_MAX;
381 for (uint i = 0; i < workers(); i++) {
382 G1FullGCCompactionPoint* cp = compaction_point(i);
383 if (cp->has_regions()) {
384 lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
385 }
386 }
387
388 for (uint i = 0; i < workers(); i++) {
389 G1FullGCCompactionPoint* cp = compaction_point(i);
390 if (cp->has_regions()) {
391 cp->remove_at_or_above(lowest_current);
392 }
393 }
394 return lowest_current;
395 }
396
397 template <bool ALT_FWD>
398 void G1FullCollector::phase2c_prepare_serial_compaction_impl() {
399 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
400 // At this point, we know that after parallel compaction there will be regions that
401 // are partially compacted into. Thus, the last compaction region of all
402 // compaction queues still have space in them. We try to re-compact these regions
403 // in serial to avoid a premature OOM when the mutator wants to allocate the first
404 // eden region after gc.
405
406 // For maximum compaction, we need to re-prepare all objects above the lowest
407 // region among the current regions for all thread compaction points. It may
408 // happen that due to the uneven distribution of objects to parallel threads, holes
409 // have been created as threads compact to different target regions between the
410 // lowest and the highest region in the tails of the compaction points.
411
412 uint start_serial = truncate_parallel_cps();
413 assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues");
414
415 G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
416 assert(!serial_cp->is_initialized(), "sanity!");
417
418 HeapRegion* start_hr = _heap->region_at(start_serial);
419 serial_cp->add(start_hr);
420 serial_cp->initialize(start_hr);
421
422 HeapWord* dense_prefix_top = compaction_top(start_hr);
423 G1SerialRePrepareClosure<ALT_FWD> re_prepare(serial_cp, dense_prefix_top);
424
425 for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
426 if (is_compaction_target(i)) {
427 HeapRegion* current = _heap->region_at(i);
428 set_compaction_top(current, current->bottom());
429 serial_cp->add(current);
430 current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
431 }
432 }
433 serial_cp->update();
434 }
435
436 void G1FullCollector::phase2c_prepare_serial_compaction() {
437 if (UseAltGCForwarding) {
438 phase2c_prepare_serial_compaction_impl<true>();
439 } else {
440 phase2c_prepare_serial_compaction_impl<false>();
441 }
442 }
443
444 template <bool ALT_FWD>
445 void G1FullCollector::phase2d_prepare_humongous_compaction_impl() {
446 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
447 G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
448 assert(serial_cp->has_regions(), "Sanity!" );
449
450 uint last_serial_target = serial_cp->current_region()->hrm_index();
451 uint region_index = last_serial_target + 1;
452 uint max_reserved_regions = _heap->max_reserved_regions();
453
454 G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point();
455
456 while (region_index < max_reserved_regions) {
457 HeapRegion* hr = _heap->region_at_or_null(region_index);
458
459 if (hr == nullptr) {
460 region_index++;
461 continue;
462 } else if (hr->is_starts_humongous()) {
463 uint num_regions = humongous_cp->forward_humongous<ALT_FWD>(hr);
464 region_index += num_regions; // Skip over the continues humongous regions.
465 continue;
466 } else if (is_compaction_target(region_index)) {
467 // Add the region to the humongous compaction point.
468 humongous_cp->add(hr);
469 }
470 region_index++;
471 }
472 }
473
474 void G1FullCollector::phase2d_prepare_humongous_compaction() {
475 if (UseAltGCForwarding) {
476 phase2d_prepare_humongous_compaction_impl<true>();
477 } else {
478 phase2d_prepare_humongous_compaction_impl<false>();
479 }
480 }
481
482 void G1FullCollector::phase3_adjust_pointers() {
483 // Adjust the pointers to reflect the new locations
484 GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
485
486 G1FullGCAdjustTask task(this);
487 run_task(&task);
488 }
489
490 void G1FullCollector::phase4_do_compaction() {
491 // Compact the heap using the compaction queues created in phase 2.
492 GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
493 G1FullGCCompactTask task(this);
494 run_task(&task);
495
496 // Serial compact to avoid OOM when very few free regions.
497 if (serial_compaction_point()->has_regions()) {
498 task.serial_compaction();
499 }
500
501 if (!_humongous_compaction_regions.is_empty()) {
|