< prev index next >

src/hotspot/share/gc/g1/g1FullCollector.cpp

Print this page

 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/systemDictionary.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "compiler/oopMap.hpp"
 30 #include "gc/g1/g1CollectedHeap.hpp"
 31 #include "gc/g1/g1FullCollector.inline.hpp"
 32 #include "gc/g1/g1FullGCAdjustTask.hpp"
 33 #include "gc/g1/g1FullGCCompactTask.hpp"
 34 #include "gc/g1/g1FullGCMarker.inline.hpp"
 35 #include "gc/g1/g1FullGCMarkTask.hpp"
 36 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 37 #include "gc/g1/g1FullGCResetMetadataTask.hpp"
 38 #include "gc/g1/g1FullGCScope.hpp"
 39 #include "gc/g1/g1OopClosures.hpp"
 40 #include "gc/g1/g1Policy.hpp"
 41 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 42 #include "gc/shared/gcTraceTime.inline.hpp"
 43 #include "gc/shared/preservedMarks.inline.hpp"
 44 #include "gc/shared/referenceProcessor.hpp"

 45 #include "gc/shared/verifyOption.hpp"
 46 #include "gc/shared/weakProcessor.inline.hpp"
 47 #include "gc/shared/workerPolicy.hpp"
 48 #include "logging/log.hpp"
 49 #include "runtime/handles.inline.hpp"
 50 #include "utilities/debug.hpp"
 51 
 52 static void clear_and_activate_derived_pointers() {
 53 #if COMPILER2_OR_JVMCI
 54   DerivedPointerTable::clear();
 55 #endif
 56 }
 57 
 58 static void deactivate_derived_pointers() {
 59 #if COMPILER2_OR_JVMCI
 60   DerivedPointerTable::set_active(false);
 61 #endif
 62 }
 63 
 64 static void update_derived_pointers() {

193   _heap->prepare_heap_for_full_collection();
194 
195   PrepareRegionsClosure cl(this);
196   _heap->heap_region_iterate(&cl);
197 
198   reference_processor()->start_discovery(scope()->should_clear_soft_refs());
199 
200   // Clear and activate derived pointer collection.
201   clear_and_activate_derived_pointers();
202 }
203 
204 void G1FullCollector::collect() {
205   G1CollectedHeap::start_codecache_marking_cycle_if_inactive(false /* concurrent_mark_start */);
206 
207   phase1_mark_live_objects();
208   verify_after_marking();
209 
210   // Don't add any more derived pointers during later phases
211   deactivate_derived_pointers();
212 


213   phase2_prepare_compaction();
214 
215   if (has_compaction_targets()) {
216     phase3_adjust_pointers();
217 
218     phase4_do_compaction();
219   } else {
220     // All regions have a high live ratio thus will not be compacted.
221     // The live ratio is only considered if do_maximal_compaction is false.
222     log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap");
223   }
224 


225   phase5_reset_metadata();
226 
227   G1CollectedHeap::finish_codecache_marking_cycle();
228 }
229 
230 void G1FullCollector::complete_collection() {
231   // Restore all marks.
232   restore_marks();
233 
234   // When the pointers have been adjusted and moved, we can
235   // update the derived pointer table.
236   update_derived_pointers();
237 
238   // Need completely cleared claim bits for the next concurrent marking or full gc.
239   ClassLoaderDataGraph::clear_claimed_marks();
240 
241   // Prepare the bitmap for the next (potentially concurrent) marking.
242   _heap->concurrent_mark()->clear_bitmap(_heap->workers());
243 
244   _heap->prepare_for_mutator_after_full_collection();

377 }
378 
379 uint G1FullCollector::truncate_parallel_cps() {
380   uint lowest_current = UINT_MAX;
381   for (uint i = 0; i < workers(); i++) {
382     G1FullGCCompactionPoint* cp = compaction_point(i);
383     if (cp->has_regions()) {
384       lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
385     }
386   }
387 
388   for (uint i = 0; i < workers(); i++) {
389     G1FullGCCompactionPoint* cp = compaction_point(i);
390     if (cp->has_regions()) {
391       cp->remove_at_or_above(lowest_current);
392     }
393   }
394   return lowest_current;
395 }
396 
397 void G1FullCollector::phase2c_prepare_serial_compaction() {

398   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
399   // At this point, we know that after parallel compaction there will be regions that
400   // are partially compacted into. Thus, the last compaction region of all
401   // compaction queues still have space in them. We try to re-compact these regions
402   // in serial to avoid a premature OOM when the mutator wants to allocate the first
403   // eden region after gc.
404 
405   // For maximum compaction, we need to re-prepare all objects above the lowest
406   // region among the current regions for all thread compaction points. It may
407   // happen that due to the uneven distribution of objects to parallel threads, holes
408   // have been created as threads compact to different target regions between the
409   // lowest and the highest region in the tails of the compaction points.
410 
411   uint start_serial = truncate_parallel_cps();
412   assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues");
413 
414   G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
415   assert(!serial_cp->is_initialized(), "sanity!");
416 
417   HeapRegion* start_hr = _heap->region_at(start_serial);
418   serial_cp->add(start_hr);
419   serial_cp->initialize(start_hr);
420 
421   HeapWord* dense_prefix_top = compaction_top(start_hr);
422   G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top);
423 
424   for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
425     if (is_compaction_target(i)) {
426       HeapRegion* current = _heap->region_at(i);
427       set_compaction_top(current, current->bottom());
428       serial_cp->add(current);
429       current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
430     }
431   }
432   serial_cp->update();
433 }
434 
435 void G1FullCollector::phase2d_prepare_humongous_compaction() {









436   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
437   G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
438   assert(serial_cp->has_regions(), "Sanity!" );
439 
440   uint last_serial_target = serial_cp->current_region()->hrm_index();
441   uint region_index = last_serial_target + 1;
442   uint max_reserved_regions = _heap->max_reserved_regions();
443 
444   G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point();
445 
446   while (region_index < max_reserved_regions) {
447     HeapRegion* hr = _heap->region_at_or_null(region_index);
448 
449     if (hr == nullptr) {
450       region_index++;
451       continue;
452     } else if (hr->is_starts_humongous()) {
453       uint num_regions = humongous_cp->forward_humongous(hr);
454       region_index += num_regions; // Skip over the continues humongous regions.
455       continue;
456     } else if (is_compaction_target(region_index)) {
457       // Add the region to the humongous compaction point.
458       humongous_cp->add(hr);
459     }
460     region_index++;
461   }
462 }
463 








464 void G1FullCollector::phase3_adjust_pointers() {
465   // Adjust the pointers to reflect the new locations
466   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
467 
468   G1FullGCAdjustTask task(this);
469   run_task(&task);
470 }
471 
472 void G1FullCollector::phase4_do_compaction() {
473   // Compact the heap using the compaction queues created in phase 2.
474   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
475   G1FullGCCompactTask task(this);
476   run_task(&task);
477 
478   // Serial compact to avoid OOM when very few free regions.
479   if (serial_compaction_point()->has_regions()) {
480     task.serial_compaction();
481   }
482 
483   if (!_humongous_compaction_regions.is_empty()) {

 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderDataGraph.hpp"
 27 #include "classfile/systemDictionary.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "compiler/oopMap.hpp"
 30 #include "gc/g1/g1CollectedHeap.hpp"
 31 #include "gc/g1/g1FullCollector.inline.hpp"
 32 #include "gc/g1/g1FullGCAdjustTask.hpp"
 33 #include "gc/g1/g1FullGCCompactTask.hpp"
 34 #include "gc/g1/g1FullGCMarker.inline.hpp"
 35 #include "gc/g1/g1FullGCMarkTask.hpp"
 36 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 37 #include "gc/g1/g1FullGCResetMetadataTask.hpp"
 38 #include "gc/g1/g1FullGCScope.hpp"
 39 #include "gc/g1/g1OopClosures.hpp"
 40 #include "gc/g1/g1Policy.hpp"
 41 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 42 #include "gc/shared/gcTraceTime.inline.hpp"
 43 #include "gc/shared/preservedMarks.inline.hpp"
 44 #include "gc/shared/referenceProcessor.hpp"
 45 #include "gc/shared/slidingForwarding.hpp"
 46 #include "gc/shared/verifyOption.hpp"
 47 #include "gc/shared/weakProcessor.inline.hpp"
 48 #include "gc/shared/workerPolicy.hpp"
 49 #include "logging/log.hpp"
 50 #include "runtime/handles.inline.hpp"
 51 #include "utilities/debug.hpp"
 52 
 53 static void clear_and_activate_derived_pointers() {
 54 #if COMPILER2_OR_JVMCI
 55   DerivedPointerTable::clear();
 56 #endif
 57 }
 58 
 59 static void deactivate_derived_pointers() {
 60 #if COMPILER2_OR_JVMCI
 61   DerivedPointerTable::set_active(false);
 62 #endif
 63 }
 64 
 65 static void update_derived_pointers() {

194   _heap->prepare_heap_for_full_collection();
195 
196   PrepareRegionsClosure cl(this);
197   _heap->heap_region_iterate(&cl);
198 
199   reference_processor()->start_discovery(scope()->should_clear_soft_refs());
200 
201   // Clear and activate derived pointer collection.
202   clear_and_activate_derived_pointers();
203 }
204 
205 void G1FullCollector::collect() {
206   G1CollectedHeap::start_codecache_marking_cycle_if_inactive(false /* concurrent_mark_start */);
207 
208   phase1_mark_live_objects();
209   verify_after_marking();
210 
211   // Don't add any more derived pointers during later phases
212   deactivate_derived_pointers();
213 
214   SlidingForwarding::begin();
215 
216   phase2_prepare_compaction();
217 
218   if (has_compaction_targets()) {
219     phase3_adjust_pointers();
220 
221     phase4_do_compaction();
222   } else {
223     // All regions have a high live ratio thus will not be compacted.
224     // The live ratio is only considered if do_maximal_compaction is false.
225     log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap");
226   }
227 
228   SlidingForwarding::end();
229 
230   phase5_reset_metadata();
231 
232   G1CollectedHeap::finish_codecache_marking_cycle();
233 }
234 
235 void G1FullCollector::complete_collection() {
236   // Restore all marks.
237   restore_marks();
238 
239   // When the pointers have been adjusted and moved, we can
240   // update the derived pointer table.
241   update_derived_pointers();
242 
243   // Need completely cleared claim bits for the next concurrent marking or full gc.
244   ClassLoaderDataGraph::clear_claimed_marks();
245 
246   // Prepare the bitmap for the next (potentially concurrent) marking.
247   _heap->concurrent_mark()->clear_bitmap(_heap->workers());
248 
249   _heap->prepare_for_mutator_after_full_collection();

382 }
383 
384 uint G1FullCollector::truncate_parallel_cps() {
385   uint lowest_current = UINT_MAX;
386   for (uint i = 0; i < workers(); i++) {
387     G1FullGCCompactionPoint* cp = compaction_point(i);
388     if (cp->has_regions()) {
389       lowest_current = MIN2(lowest_current, cp->current_region()->hrm_index());
390     }
391   }
392 
393   for (uint i = 0; i < workers(); i++) {
394     G1FullGCCompactionPoint* cp = compaction_point(i);
395     if (cp->has_regions()) {
396       cp->remove_at_or_above(lowest_current);
397     }
398   }
399   return lowest_current;
400 }
401 
402 template <bool ALT_FWD>
403 void G1FullCollector::phase2c_prepare_serial_compaction_impl() {
404   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
405   // At this point, we know that after parallel compaction there will be regions that
406   // are partially compacted into. Thus, the last compaction region of all
407   // compaction queues still have space in them. We try to re-compact these regions
408   // in serial to avoid a premature OOM when the mutator wants to allocate the first
409   // eden region after gc.
410 
411   // For maximum compaction, we need to re-prepare all objects above the lowest
412   // region among the current regions for all thread compaction points. It may
413   // happen that due to the uneven distribution of objects to parallel threads, holes
414   // have been created as threads compact to different target regions between the
415   // lowest and the highest region in the tails of the compaction points.
416 
417   uint start_serial = truncate_parallel_cps();
418   assert(start_serial < _heap->max_reserved_regions(), "Called on empty parallel compaction queues");
419 
420   G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
421   assert(!serial_cp->is_initialized(), "sanity!");
422 
423   HeapRegion* start_hr = _heap->region_at(start_serial);
424   serial_cp->add(start_hr);
425   serial_cp->initialize(start_hr);
426 
427   HeapWord* dense_prefix_top = compaction_top(start_hr);
428   G1SerialRePrepareClosure<ALT_FWD> re_prepare(serial_cp, dense_prefix_top);
429 
430   for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
431     if (is_compaction_target(i)) {
432       HeapRegion* current = _heap->region_at(i);
433       set_compaction_top(current, current->bottom());
434       serial_cp->add(current);
435       current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
436     }
437   }
438   serial_cp->update();
439 }
440 
441 void G1FullCollector::phase2c_prepare_serial_compaction() {
442   if (UseAltGCForwarding) {
443     phase2c_prepare_serial_compaction_impl<true>();
444   } else {
445     phase2c_prepare_serial_compaction_impl<false>();
446   }
447 }
448 
449 template <bool ALT_FWD>
450 void G1FullCollector::phase2d_prepare_humongous_compaction_impl() {
451   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
452   G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
453   assert(serial_cp->has_regions(), "Sanity!" );
454 
455   uint last_serial_target = serial_cp->current_region()->hrm_index();
456   uint region_index = last_serial_target + 1;
457   uint max_reserved_regions = _heap->max_reserved_regions();
458 
459   G1FullGCCompactionPoint* humongous_cp = humongous_compaction_point();
460 
461   while (region_index < max_reserved_regions) {
462     HeapRegion* hr = _heap->region_at_or_null(region_index);
463 
464     if (hr == nullptr) {
465       region_index++;
466       continue;
467     } else if (hr->is_starts_humongous()) {
468       uint num_regions = humongous_cp->forward_humongous<ALT_FWD>(hr);
469       region_index += num_regions; // Skip over the continues humongous regions.
470       continue;
471     } else if (is_compaction_target(region_index)) {
472       // Add the region to the humongous compaction point.
473       humongous_cp->add(hr);
474     }
475     region_index++;
476   }
477 }
478 
479 void G1FullCollector::phase2d_prepare_humongous_compaction() {
480   if (UseAltGCForwarding) {
481     phase2d_prepare_humongous_compaction_impl<true>();
482   } else {
483     phase2d_prepare_humongous_compaction_impl<false>();
484   }
485 }
486 
487 void G1FullCollector::phase3_adjust_pointers() {
488   // Adjust the pointers to reflect the new locations
489   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
490 
491   G1FullGCAdjustTask task(this);
492   run_task(&task);
493 }
494 
495 void G1FullCollector::phase4_do_compaction() {
496   // Compact the heap using the compaction queues created in phase 2.
497   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
498   G1FullGCCompactTask task(this);
499   run_task(&task);
500 
501   // Serial compact to avoid OOM when very few free regions.
502   if (serial_compaction_point()->has_regions()) {
503     task.serial_compaction();
504   }
505 
506   if (!_humongous_compaction_regions.is_empty()) {
< prev index next >