< prev index next >

src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp

Print this page

 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1CollectedHeap.inline.hpp"
 27 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 28 #include "gc/g1/g1FullCollector.inline.hpp"
 29 #include "gc/g1/g1FullGCCompactionPoint.hpp"
 30 #include "gc/g1/g1FullGCMarker.hpp"
 31 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
 32 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 33 #include "gc/g1/g1HotCardCache.hpp"
 34 #include "gc/g1/heapRegion.inline.hpp"
 35 #include "gc/shared/gcTraceTime.inline.hpp"
 36 #include "gc/shared/referenceProcessor.hpp"

 37 #include "logging/log.hpp"
 38 #include "memory/iterator.inline.hpp"
 39 #include "oops/oop.inline.hpp"
 40 #include "utilities/ticks.hpp"
 41 
 42 G1DetermineCompactionQueueClosure::G1DetermineCompactionQueueClosure(G1FullCollector* collector) :
 43   _g1h(G1CollectedHeap::heap()),
 44   _collector(collector),
 45   _cur_worker(0) { }
 46 
 47 bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
 48   uint region_idx = hr->hrm_index();
 49   assert(_collector->is_compaction_target(region_idx), "must be");
 50 
 51   assert(!hr->is_pinned(), "must be");
 52   assert(!hr->is_closed_archive(), "must be");
 53   assert(!hr->is_open_archive(), "must be");
 54 
 55   prepare_for_compaction(hr);
 56 

126   }
127 }
128 
129 bool G1FullGCPrepareTask::G1ResetMetadataClosure::do_heap_region(HeapRegion* hr) {
130   uint const region_idx = hr->hrm_index();
131   if (!_collector->is_compaction_target(region_idx)) {
132     assert(!hr->is_free(), "all free regions should be compaction targets");
133     assert(_collector->is_skip_compacting(region_idx) || hr->is_closed_archive(), "must be");
134     if (hr->needs_scrubbing_during_full_gc()) {
135       scrub_skip_compacting_region(hr, hr->is_young());
136     }
137   }
138 
139   // Reset data structures not valid after Full GC.
140   reset_region_metadata(hr);
141 
142   return false;
143 }
144 
145 G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
146     _cp(cp) { }
147 
148 size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
149   size_t size = object->size();
150   _cp->forward(object, size);
151   return size;
152 }
153 
154 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
155   if (!_collector->is_free(hr->hrm_index())) {
156     G1PrepareCompactLiveClosure prepare_compact(_cp);
157     hr->apply_to_marked_objects(_bitmap, &prepare_compact);
158   }
159 }
160 
161 void G1FullGCPrepareTask::G1ResetMetadataClosure::scrub_skip_compacting_region(HeapRegion* hr, bool update_bot_for_live) {
162   assert(hr->needs_scrubbing_during_full_gc(), "must be");
163 
164   HeapWord* limit = hr->top();
165   HeapWord* current_obj = hr->bottom();
166   G1CMBitMap* bitmap = _collector->mark_bitmap();
167 
168   while (current_obj < limit) {
169     if (bitmap->is_marked(current_obj)) {
170       oop current = cast_to_oop(current_obj);

 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1CollectedHeap.inline.hpp"
 27 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 28 #include "gc/g1/g1FullCollector.inline.hpp"
 29 #include "gc/g1/g1FullGCCompactionPoint.hpp"
 30 #include "gc/g1/g1FullGCMarker.hpp"
 31 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
 32 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 33 #include "gc/g1/g1HotCardCache.hpp"
 34 #include "gc/g1/heapRegion.inline.hpp"
 35 #include "gc/shared/gcTraceTime.inline.hpp"
 36 #include "gc/shared/referenceProcessor.hpp"
 37 #include "gc/shared/slidingForwarding.inline.hpp"
 38 #include "logging/log.hpp"
 39 #include "memory/iterator.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "utilities/ticks.hpp"
 42 
 43 G1DetermineCompactionQueueClosure::G1DetermineCompactionQueueClosure(G1FullCollector* collector) :
 44   _g1h(G1CollectedHeap::heap()),
 45   _collector(collector),
 46   _cur_worker(0) { }
 47 
 48 bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
 49   uint region_idx = hr->hrm_index();
 50   assert(_collector->is_compaction_target(region_idx), "must be");
 51 
 52   assert(!hr->is_pinned(), "must be");
 53   assert(!hr->is_closed_archive(), "must be");
 54   assert(!hr->is_open_archive(), "must be");
 55 
 56   prepare_for_compaction(hr);
 57 

127   }
128 }
129 
130 bool G1FullGCPrepareTask::G1ResetMetadataClosure::do_heap_region(HeapRegion* hr) {
131   uint const region_idx = hr->hrm_index();
132   if (!_collector->is_compaction_target(region_idx)) {
133     assert(!hr->is_free(), "all free regions should be compaction targets");
134     assert(_collector->is_skip_compacting(region_idx) || hr->is_closed_archive(), "must be");
135     if (hr->needs_scrubbing_during_full_gc()) {
136       scrub_skip_compacting_region(hr, hr->is_young());
137     }
138   }
139 
140   // Reset data structures not valid after Full GC.
141   reset_region_metadata(hr);
142 
143   return false;
144 }
145 
146 G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
147     _cp(cp), _forwarding(G1CollectedHeap::heap()->forwarding()) { }
148 
149 size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
150   size_t size = object->size();
151   _cp->forward(_forwarding, object, size);
152   return size;
153 }
154 
155 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
156   if (!_collector->is_free(hr->hrm_index())) {
157     G1PrepareCompactLiveClosure prepare_compact(_cp);
158     hr->apply_to_marked_objects(_bitmap, &prepare_compact);
159   }
160 }
161 
162 void G1FullGCPrepareTask::G1ResetMetadataClosure::scrub_skip_compacting_region(HeapRegion* hr, bool update_bot_for_live) {
163   assert(hr->needs_scrubbing_during_full_gc(), "must be");
164 
165   HeapWord* limit = hr->top();
166   HeapWord* current_obj = hr->bottom();
167   G1CMBitMap* bitmap = _collector->mark_bitmap();
168 
169   while (current_obj < limit) {
170     if (bitmap->is_marked(current_obj)) {
171       oop current = cast_to_oop(current_obj);
< prev index next >