< prev index next > src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp
Print this page
#include "gc/g1/g1FullGCPrepareTask.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/referenceProcessor.hpp"
+ #include "gc/shared/slidingForwarding.inline.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/oop.inline.hpp"
#include "utilities/ticks.hpp"
if (hcc->use_cache()) {
hcc->reset_card_counts(hr);
}
}
! G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
_cp(cp) { }
! size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
size_t size = object->size();
! _cp->forward(object, size);
return size;
}
! size_t G1FullGCPrepareTask::G1RePrepareClosure::apply(oop obj) {
// We only re-prepare objects forwarded within the current region, so
// skip objects that are already forwarded to another region.
! oop forwarded_to = obj->forwardee();
! if (forwarded_to != NULL && !_current->is_in(forwarded_to)) {
! return obj->size();
}
-
// Get size and forward.
size_t size = obj->size();
! _cp->forward(obj, size);
return size;
}
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
HeapRegion* hr) {
! G1PrepareCompactLiveClosure prepare_compact(cp);
! hr->set_compaction_top(hr->bottom());
! hr->apply_to_marked_objects(_bitmap, &prepare_compact);
}
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
if (!_cp->is_initialized()) {
hr->set_compaction_top(hr->bottom());
if (hcc->use_cache()) {
hcc->reset_card_counts(hr);
}
}
! template <bool ALT_FWD>
+ G1FullGCPrepareTask::G1PrepareCompactLiveClosure<ALT_FWD>::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
_cp(cp) { }
! template <bool ALT_FWD>
+ size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure<ALT_FWD>::apply(oop object) {
size_t size = object->size();
! _cp->forward<ALT_FWD>(object, size);
return size;
}
! template <bool ALT_FWD>
+ size_t G1FullGCPrepareTask::G1RePrepareClosure<ALT_FWD>::apply(oop obj) {
// We only re-prepare objects forwarded within the current region, so
// skip objects that are already forwarded to another region.
! if (SlidingForwarding::is_forwarded(obj)) {
! oop forwarded_to = SlidingForwarding::forwardee<ALT_FWD>(obj);
! assert(forwarded_to != NULL, "must have forwardee");
+ if (!_current->is_in(forwarded_to)) {
+ return obj->size();
+ }
}
// Get size and forward.
size_t size = obj->size();
! _cp->forward<ALT_FWD>(obj, size);
return size;
}
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
HeapRegion* hr) {
! if (UseAltGCForwarding) {
! G1PrepareCompactLiveClosure<true> prepare_compact(cp);
! hr->set_compaction_top(hr->bottom());
+ hr->apply_to_marked_objects(_bitmap, &prepare_compact);
+ } else {
+ G1PrepareCompactLiveClosure<false> prepare_compact(cp);
+ hr->set_compaction_top(hr->bottom());
+ hr->apply_to_marked_objects(_bitmap, &prepare_compact);
+ }
}
void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
if (!_cp->is_initialized()) {
hr->set_compaction_top(hr->bottom());
// Add region to the compaction queue and prepare it.
_cp->add(hr);
prepare_for_compaction_work(_cp, hr);
}
! void G1FullGCPrepareTask::prepare_serial_compaction() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());
// At this point we know that no regions were completely freed by
// the parallel compaction. That means that the last region of
// all compaction queues still have data in them. We try to compact
// these regions in serial to avoid a premature OOM.
// Add region to the compaction queue and prepare it.
_cp->add(hr);
prepare_for_compaction_work(_cp, hr);
}
! template <bool ALT_FWD>
+ void G1FullGCPrepareTask::prepare_serial_compaction_impl() {
GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());
// At this point we know that no regions were completely freed by
// the parallel compaction. That means that the last region of
// all compaction queues still have data in them. We try to compact
// these regions in serial to avoid a premature OOM.
// Initialize the compaction point. Nothing more is needed for the first heap region
// since it is already prepared for compaction.
cp->initialize(current, false);
} else {
assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
! G1RePrepareClosure re_prepare(cp, current);
current->set_compaction_top(current->bottom());
current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
}
}
cp->update();
}
bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
if (_regions_freed) {
return true;
}
// Initialize the compaction point. Nothing more is needed for the first heap region
// since it is already prepared for compaction.
cp->initialize(current, false);
} else {
assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
! G1RePrepareClosure<ALT_FWD> re_prepare(cp, current);
current->set_compaction_top(current->bottom());
current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
}
}
cp->update();
}
+ void G1FullGCPrepareTask::prepare_serial_compaction() {
+ if (UseAltGCForwarding) {
+ prepare_serial_compaction_impl<true>();
+ } else {
+ prepare_serial_compaction_impl<false>();
+ }
+ }
+
bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
if (_regions_freed) {
return true;
}
< prev index next >