< prev index next > src/hotspot/share/gc/g1/g1FullCollector.cpp
Print this page
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/preservedMarks.hpp"
#include "gc/shared/referenceProcessor.hpp"
+ #include "gc/shared/slidingForwarding.hpp"
#include "gc/shared/verifyOption.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "logging/log.hpp"
#include "runtime/handles.inline.hpp"
}
void G1FullCollector::phase2_prepare_compaction() {
GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
+ _heap->forwarding()->clear();
+
phase2a_determine_worklists();
bool has_free_compaction_targets = phase2b_forward_oops();
// Try to avoid OOM immediately after Full GC in case there are no free regions
// left after determining the result locations (i.e. this phase). Prepare to
// maximally compact the tail regions of the compaction queues serially.
- if (!has_free_compaction_targets) {
- phase2c_prepare_serial_compaction();
- }
+ // TODO: Disabled for now because it violates sliding-forwarding assumption.
+ // if (!has_free_compaction_targets) {
+ // phase2c_prepare_serial_compaction();
+ // }
}
void G1FullCollector::phase2a_determine_worklists() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
return task.has_free_compaction_targets();
}
void G1FullCollector::phase2c_prepare_serial_compaction() {
- GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
- // At this point we know that after parallel compaction there will be no
- // completely free regions. That means that the last region of
- // all compaction queues still have data in them. We try to compact
- // these regions in serial to avoid a premature OOM when the mutator wants
- // to allocate the first eden region after gc.
- for (uint i = 0; i < workers(); i++) {
- G1FullGCCompactionPoint* cp = compaction_point(i);
- if (cp->has_regions()) {
- serial_compaction_point()->add(cp->remove_last());
- }
- }
-
- // Update the forwarding information for the regions in the serial
- // compaction point.
- G1FullGCCompactionPoint* cp = serial_compaction_point();
- for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
- HeapRegion* current = *it;
- if (!cp->is_initialized()) {
- // Initialize the compaction point. Nothing more is needed for the first heap region
- // since it is already prepared for compaction.
- cp->initialize(current);
- } else {
- assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
- G1SerialRePrepareClosure re_prepare(cp, current);
- current->set_compaction_top(current->bottom());
- current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
- }
- }
- cp->update();
+ ShouldNotReachHere(); // Disabled in Lilliput.
+ // GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
+ // // At this point we know that after parallel compaction there will be no
+ // // completely free regions. That means that the last region of
+ // // all compaction queues still have data in them. We try to compact
+ // // these regions in serial to avoid a premature OOM when the mutator wants
+ // // to allocate the first eden region after gc.
+ // for (uint i = 0; i < workers(); i++) {
+ // G1FullGCCompactionPoint* cp = compaction_point(i);
+ // if (cp->has_regions()) {
+ // serial_compaction_point()->add(cp->remove_last());
+ // }
+ // }
+ //
+ // // Update the forwarding information for the regions in the serial
+ // // compaction point.
+ // G1FullGCCompactionPoint* cp = serial_compaction_point();
+ // for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
+ // HeapRegion* current = *it;
+ // if (!cp->is_initialized()) {
+ // // Initialize the compaction point. Nothing more is needed for the first heap region
+ // // since it is already prepared for compaction.
+ // cp->initialize(current);
+ // } else {
+ // assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
+ // G1SerialRePrepareClosure re_prepare(cp, current);
+ // current->set_compaction_top(current->bottom());
+ // current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
+ // }
+ // }
+ // cp->update();
}
void G1FullCollector::phase3_adjust_pointers() {
// Adjust the pointers to reflect the new locations
GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
< prev index next >