< prev index next > src/hotspot/share/gc/g1/g1FullCollector.cpp
Print this page
#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/referenceProcessor.hpp"
+ #include "gc/shared/slidingForwarding.hpp"
#include "gc/shared/verifyOption.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "runtime/handles.inline.hpp"
verify_after_marking();
// Don't add any more derived pointers during later phases
deactivate_derived_pointers();
+ SlidingForwarding::begin();
+
phase2_prepare_compaction();
if (has_compaction_targets()) {
phase3_adjust_pointers();
// All regions have a high live ratio thus will not be compacted.
// The live ratio is only considered if do_maximal_compaction is false.
log_info(gc, phases) ("No Regions selected for compaction. Skipping Phase 3: Adjust pointers and Phase 4: Compact heap");
}
+ SlidingForwarding::end();
+
phase5_reset_metadata();
G1CollectedHeap::finish_codecache_marking_cycle();
}
}
}
return lowest_current;
}
! void G1FullCollector::phase2c_prepare_serial_compaction() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
// At this point, we know that after parallel compaction there will be regions that
// are partially compacted into. Thus, the last compaction region of all
// compaction queues still have space in them. We try to re-compact these regions
// in serial to avoid a premature OOM when the mutator wants to allocate the first
}
}
return lowest_current;
}
! template <bool ALT_FWD>
+ void G1FullCollector::phase2c_prepare_serial_compaction_impl() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
// At this point, we know that after parallel compaction there will be regions that
// are partially compacted into. Thus, the last compaction region of all
// compaction queues still have space in them. We try to re-compact these regions
// in serial to avoid a premature OOM when the mutator wants to allocate the first
HeapRegion* start_hr = _heap->region_at(start_serial);
serial_cp->add(start_hr);
serial_cp->initialize(start_hr);
HeapWord* dense_prefix_top = compaction_top(start_hr);
! G1SerialRePrepareClosure re_prepare(serial_cp, dense_prefix_top);
for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
if (is_compaction_target(i)) {
HeapRegion* current = _heap->region_at(i);
set_compaction_top(current, current->bottom());
HeapRegion* start_hr = _heap->region_at(start_serial);
serial_cp->add(start_hr);
serial_cp->initialize(start_hr);
HeapWord* dense_prefix_top = compaction_top(start_hr);
! G1SerialRePrepareClosure<ALT_FWD> re_prepare(serial_cp, dense_prefix_top);
for (uint i = start_serial + 1; i < _heap->max_reserved_regions(); i++) {
if (is_compaction_target(i)) {
HeapRegion* current = _heap->region_at(i);
set_compaction_top(current, current->bottom());
}
}
serial_cp->update();
}
! void G1FullCollector::phase2d_prepare_humongous_compaction() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
assert(serial_cp->has_regions(), "Sanity!" );
uint last_serial_target = serial_cp->current_region()->hrm_index();
}
}
serial_cp->update();
}
! void G1FullCollector::phase2c_prepare_serial_compaction() {
+ if (UseAltGCForwarding) {
+ phase2c_prepare_serial_compaction_impl<true>();
+ } else {
+ phase2c_prepare_serial_compaction_impl<false>();
+ }
+ }
+
+ template <bool ALT_FWD>
+ void G1FullCollector::phase2d_prepare_humongous_compaction_impl() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare humongous compaction", scope()->timer());
G1FullGCCompactionPoint* serial_cp = serial_compaction_point();
assert(serial_cp->has_regions(), "Sanity!" );
uint last_serial_target = serial_cp->current_region()->hrm_index();
if (hr == nullptr) {
region_index++;
continue;
} else if (hr->is_starts_humongous()) {
! uint num_regions = humongous_cp->forward_humongous(hr);
region_index += num_regions; // Skip over the continues humongous regions.
continue;
} else if (is_compaction_target(region_index)) {
// Add the region to the humongous compaction point.
humongous_cp->add(hr);
}
region_index++;
}
}
void G1FullCollector::phase3_adjust_pointers() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
G1FullGCAdjustTask task(this);
if (hr == nullptr) {
region_index++;
continue;
} else if (hr->is_starts_humongous()) {
! uint num_regions = humongous_cp->forward_humongous<ALT_FWD>(hr);
region_index += num_regions; // Skip over the continues humongous regions.
continue;
} else if (is_compaction_target(region_index)) {
// Add the region to the humongous compaction point.
humongous_cp->add(hr);
}
region_index++;
}
}
+ void G1FullCollector::phase2d_prepare_humongous_compaction() {
+ if (UseAltGCForwarding) {
+ phase2d_prepare_humongous_compaction_impl<true>();
+ } else {
+ phase2d_prepare_humongous_compaction_impl<false>();
+ }
+ }
+
void G1FullCollector::phase3_adjust_pointers() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
G1FullGCAdjustTask task(this);
< prev index next >