25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/g1/g1CollectedHeap.hpp"
31 #include "gc/g1/g1FullCollector.inline.hpp"
32 #include "gc/g1/g1FullGCAdjustTask.hpp"
33 #include "gc/g1/g1FullGCCompactTask.hpp"
34 #include "gc/g1/g1FullGCMarker.inline.hpp"
35 #include "gc/g1/g1FullGCMarkTask.hpp"
36 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
37 #include "gc/g1/g1FullGCResetMetadataTask.hpp"
38 #include "gc/g1/g1FullGCScope.hpp"
39 #include "gc/g1/g1OopClosures.hpp"
40 #include "gc/g1/g1Policy.hpp"
41 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
42 #include "gc/shared/gcTraceTime.inline.hpp"
43 #include "gc/shared/preservedMarks.hpp"
44 #include "gc/shared/referenceProcessor.hpp"
45 #include "gc/shared/verifyOption.hpp"
46 #include "gc/shared/weakProcessor.inline.hpp"
47 #include "gc/shared/workerPolicy.hpp"
48 #include "logging/log.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "utilities/debug.hpp"
51
52 static void clear_and_activate_derived_pointers() {
53 #if COMPILER2_OR_JVMCI
54 DerivedPointerTable::clear();
55 #endif
56 }
57
58 static void deactivate_derived_pointers() {
59 #if COMPILER2_OR_JVMCI
60 DerivedPointerTable::set_active(false);
61 #endif
62 }
63
64 static void update_derived_pointers() {
313
314 // Class unloading and cleanup.
315 if (ClassUnloading) {
316 GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
317 CodeCache::UnloadingScope unloading_scope(&_is_alive);
318 // Unload classes and purge the SystemDictionary.
319 bool purged_class = SystemDictionary::do_unloading(scope()->timer());
320 _heap->complete_cleaning(purged_class);
321 }
322
323 scope()->tracer()->report_object_count_after_gc(&_is_alive);
324 #if TASKQUEUE_STATS
325 oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
326 array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
327 #endif
328 }
329
330 void G1FullCollector::phase2_prepare_compaction() {
331 GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
332
333 phase2a_determine_worklists();
334
335 if (!has_compaction_targets()) {
336 return;
337 }
338
339 bool has_free_compaction_targets = phase2b_forward_oops();
340
341 // Try to avoid OOM immediately after Full GC in case there are no free regions
342 // left after determining the result locations (i.e. this phase). Prepare to
343 // maximally compact the tail regions of the compaction queues serially.
344 if (scope()->do_maximal_compaction() || !has_free_compaction_targets) {
345 phase2c_prepare_serial_compaction();
346 }
347 }
348
349 void G1FullCollector::phase2a_determine_worklists() {
350 GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
351
352 G1DetermineCompactionQueueClosure cl(this);
353 _heap->heap_region_iterate(&cl);
354 }
355
356 bool G1FullCollector::phase2b_forward_oops() {
357 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
358
359 G1FullGCPrepareTask task(this);
360 run_task(&task);
361
362 return task.has_free_compaction_targets();
363 }
364
365 uint G1FullCollector::truncate_parallel_cps() {
366 uint lowest_current = (uint)-1;
367 for (uint i = 0; i < workers(); i++) {
368 G1FullGCCompactionPoint* cp = compaction_point(i);
369 if (cp->has_regions()) {
370 lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
371 }
372 }
373
374 for (uint i = 0; i < workers(); i++) {
375 G1FullGCCompactionPoint* cp = compaction_point(i);
376 if (cp->has_regions()) {
377 cp->remove_at_or_above(lowest_current);
378 }
379 }
380 return lowest_current;
381 }
382
383 void G1FullCollector::phase2c_prepare_serial_compaction() {
384 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
385 // At this point, we know that after parallel compaction there will be regions that
386 // are partially compacted into. Thus, the last compaction region of all
387 // compaction queues still have space in them. We try to re-compact these regions
388 // in serial to avoid a premature OOM when the mutator wants to allocate the first
389 // eden region after gc.
390
391 // For maximum compaction, we need to re-prepare all objects above the lowest
392 // region among the current regions for all thread compaction points. It may
393 // happen that due to the uneven distribution of objects to parallel threads, holes
394 // have been created as threads compact to different target regions between the
395 // lowest and the highest region in the tails of the compaction points.
396
397 uint start_serial = truncate_parallel_cps();
398 assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues");
399
400 G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
401 assert(!serial_cp->is_initialized(), "sanity!");
402
403 HeapRegion* start_hr = _heap->region_at(start_serial);
404 serial_cp->add(start_hr);
405 serial_cp->initialize(start_hr);
406
407 HeapWord* dense_prefix_top = compaction_top(start_hr);
408 G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top);
409
410 for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
411 if (is_compaction_target(i)) {
412 HeapRegion* current = _heap->region_at(i);
413 set_compaction_top(current, current->bottom());
414 serial_cp->add(current);
415 current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
416 }
417 }
418 serial_cp->update();
419 }
420
421 void G1FullCollector::phase3_adjust_pointers() {
422 // Adjust the pointers to reflect the new locations
423 GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
424
425 G1FullGCAdjustTask task(this);
426 run_task(&task);
427 }
428
429 void G1FullCollector::phase4_do_compaction() {
430 // Compact the heap using the compaction queues created in phase 2.
431 GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
432 G1FullGCCompactTask task(this);
433 run_task(&task);
434
435 // Serial compact to avoid OOM when very few free regions.
436 if (serial_compaction_point()->has_regions()) {
437 task.serial_compaction();
438 }
439 }
|
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/g1/g1CollectedHeap.hpp"
31 #include "gc/g1/g1FullCollector.inline.hpp"
32 #include "gc/g1/g1FullGCAdjustTask.hpp"
33 #include "gc/g1/g1FullGCCompactTask.hpp"
34 #include "gc/g1/g1FullGCMarker.inline.hpp"
35 #include "gc/g1/g1FullGCMarkTask.hpp"
36 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
37 #include "gc/g1/g1FullGCResetMetadataTask.hpp"
38 #include "gc/g1/g1FullGCScope.hpp"
39 #include "gc/g1/g1OopClosures.hpp"
40 #include "gc/g1/g1Policy.hpp"
41 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
42 #include "gc/shared/gcTraceTime.inline.hpp"
43 #include "gc/shared/preservedMarks.hpp"
44 #include "gc/shared/referenceProcessor.hpp"
45 #include "gc/shared/slidingForwarding.hpp"
46 #include "gc/shared/verifyOption.hpp"
47 #include "gc/shared/weakProcessor.inline.hpp"
48 #include "gc/shared/workerPolicy.hpp"
49 #include "logging/log.hpp"
50 #include "runtime/handles.inline.hpp"
51 #include "utilities/debug.hpp"
52
53 static void clear_and_activate_derived_pointers() {
54 #if COMPILER2_OR_JVMCI
55 DerivedPointerTable::clear();
56 #endif
57 }
58
59 static void deactivate_derived_pointers() {
60 #if COMPILER2_OR_JVMCI
61 DerivedPointerTable::set_active(false);
62 #endif
63 }
64
65 static void update_derived_pointers() {
314
315 // Class unloading and cleanup.
316 if (ClassUnloading) {
317 GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
318 CodeCache::UnloadingScope unloading_scope(&_is_alive);
319 // Unload classes and purge the SystemDictionary.
320 bool purged_class = SystemDictionary::do_unloading(scope()->timer());
321 _heap->complete_cleaning(purged_class);
322 }
323
324 scope()->tracer()->report_object_count_after_gc(&_is_alive);
325 #if TASKQUEUE_STATS
326 oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
327 array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
328 #endif
329 }
330
331 void G1FullCollector::phase2_prepare_compaction() {
332 GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
333
334 _heap->forwarding()->clear();
335
336 phase2a_determine_worklists();
337
338 if (!has_compaction_targets()) {
339 return;
340 }
341
342 bool has_free_compaction_targets = phase2b_forward_oops();
343
344 // Try to avoid OOM immediately after Full GC in case there are no free regions
345 // left after determining the result locations (i.e. this phase). Prepare to
346 // maximally compact the tail regions of the compaction queues serially.
347 // TODO: Disabled for now because it violates sliding-forwarding assumption.
348 // if (scope()->do_maximal_compaction() || !has_free_compaction_targets) {
349 // phase2c_prepare_serial_compaction();
350 // }
351 }
352
353 void G1FullCollector::phase2a_determine_worklists() {
354 GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
355
356 G1DetermineCompactionQueueClosure cl(this);
357 _heap->heap_region_iterate(&cl);
358 }
359
360 bool G1FullCollector::phase2b_forward_oops() {
361 GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
362
363 G1FullGCPrepareTask task(this);
364 run_task(&task);
365
366 return task.has_free_compaction_targets();
367 }
368
369 //uint G1FullCollector::truncate_parallel_cps() {
370 // uint lowest_current = (uint)-1;
371 // for (uint i = 0; i < workers(); i++) {
372 // G1FullGCCompactionPoint* cp = compaction_point(i);
373 // if (cp->has_regions()) {
374 // lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
375 // }
376 // }
377
378 // for (uint i = 0; i < workers(); i++) {
379 // G1FullGCCompactionPoint* cp = compaction_point(i);
380 // if (cp->has_regions()) {
381 // cp->remove_at_or_above(lowest_current);
382 // }
383 // }
384 // return lowest_current;
385 //}
386
387 //void G1FullCollector::phase2c_prepare_serial_compaction() {
388 // GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
389 // // At this point, we know that after parallel compaction there will be regions that
390 // // are partially compacted into. Thus, the last compaction region of all
391 // // compaction queues still have space in them. We try to re-compact these regions
392 // // in serial to avoid a premature OOM when the mutator wants to allocate the first
393 // // eden region after gc.
394 //
395 // // For maximum compaction, we need to re-prepare all objects above the lowest
396 // // region among the current regions for all thread compaction points. It may
397 // // happen that due to the uneven distribution of objects to parallel threads, holes
398 // // have been created as threads compact to different target regions between the
399 // // lowest and the highest region in the tails of the compaction points.
400 //
401 // uint start_serial = truncate_parallel_cps();
402 // assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues");
403 //
404 // G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
405 // assert(!serial_cp->is_initialized(), "sanity!");
406 //
407 // HeapRegion* start_hr = _heap->region_at(start_serial);
408 // serial_cp->add(start_hr);
409 // serial_cp->initialize(start_hr);
410 //
411 // HeapWord* dense_prefix_top = compaction_top(start_hr);
412 // G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top);
413 //
414 // for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
415 // if (is_compaction_target(i)) {
416 // HeapRegion* current = _heap->region_at(i);
417 // set_compaction_top(current, current->bottom());
418 // serial_cp->add(current);
419 // current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
420 // }
421 // }
422 // serial_cp->update();
423 //}
424
425 void G1FullCollector::phase3_adjust_pointers() {
426 // Adjust the pointers to reflect the new locations
427 GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
428
429 G1FullGCAdjustTask task(this);
430 run_task(&task);
431 }
432
433 void G1FullCollector::phase4_do_compaction() {
434 // Compact the heap using the compaction queues created in phase 2.
435 GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
436 G1FullGCCompactTask task(this);
437 run_task(&task);
438
439 // Serial compact to avoid OOM when very few free regions.
440 if (serial_compaction_point()->has_regions()) {
441 task.serial_compaction();
442 }
443 }
|