< prev index next >

src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp

Print this page

 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1CollectedHeap.hpp"
 27 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 28 #include "gc/g1/g1FullCollector.inline.hpp"
 29 #include "gc/g1/g1FullGCCompactionPoint.hpp"
 30 #include "gc/g1/g1FullGCMarker.hpp"
 31 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
 32 #include "gc/g1/g1FullGCPrepareTask.hpp"
 33 #include "gc/g1/g1HotCardCache.hpp"
 34 #include "gc/g1/heapRegion.inline.hpp"
 35 #include "gc/shared/gcTraceTime.inline.hpp"
 36 #include "gc/shared/referenceProcessor.hpp"

 37 #include "logging/log.hpp"
 38 #include "memory/iterator.inline.hpp"
 39 #include "oops/oop.inline.hpp"
 40 #include "utilities/ticks.hpp"
 41 
 42 template<bool is_humongous>
 43 void G1FullGCPrepareTask::G1CalculatePointersClosure::free_pinned_region(HeapRegion* hr) {
 44   _regions_freed = true;
 45   if (is_humongous) {
 46     _g1h->free_humongous_region(hr, nullptr);
 47   } else {
 48     _g1h->free_region(hr, nullptr);
 49   }
 50   prepare_for_compaction(hr);
 51   _collector->set_invalid(hr->hrm_index());
 52 }
 53 
 54 bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
 55   bool force_not_compacted = false;
 56   if (should_compact(hr)) {

140   if (hr->is_pinned()) {
141     return false;
142   }
143   size_t live_words = _collector->live_words(hr->hrm_index());
144   size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
145   // High live ratio region will not be compacted.
146   return live_words <= live_words_threshold;
147 }
148 
149 void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
150   hr->rem_set()->clear();
151   hr->clear_cardtable();
152 
153   G1HotCardCache* hcc = _g1h->hot_card_cache();
154   if (hcc->use_cache()) {
155     hcc->reset_card_counts(hr);
156   }
157 }
158 
159 G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
160     _cp(cp) { }
161 
162 size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
163   size_t size = object->size();
164   _cp->forward(object, size);
165   return size;
166 }
167 
168 size_t G1FullGCPrepareTask::G1RePrepareClosure::apply(oop obj) {

169   // We only re-prepare objects forwarded within the current region, so
170   // skip objects that are already forwarded to another region.

171   oop forwarded_to = obj->forwardee();
172   if (forwarded_to != NULL && !_current->is_in(forwarded_to)) {
173     return obj->size();
174   }
175 
176   // Get size and forward.
177   size_t size = obj->size();
178   _cp->forward(obj, size);
179 
180   return size;


181 }
182 
183 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
184                                                                                   HeapRegion* hr) {
185   G1PrepareCompactLiveClosure prepare_compact(cp);
186   hr->set_compaction_top(hr->bottom());
187   hr->apply_to_marked_objects(_bitmap, &prepare_compact);
188 }
189 
190 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
191   if (!_cp->is_initialized()) {
192     hr->set_compaction_top(hr->bottom());
193     _cp->initialize(hr, true);
194   }
195   // Add region to the compaction queue and prepare it.
196   _cp->add(hr);
197   prepare_for_compaction_work(_cp, hr);
198 }
199 
200 void G1FullGCPrepareTask::prepare_serial_compaction() {
201   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());

202   // At this point we know that no regions were completely freed by
203   // the parallel compaction. That means that the last region of
204   // all compaction queues still have data in them. We try to compact
205   // these regions in serial to avoid a premature OOM.

206   for (uint i = 0; i < collector()->workers(); i++) {
207     G1FullGCCompactionPoint* cp = collector()->compaction_point(i);
208     if (cp->has_regions()) {
209       collector()->serial_compaction_point()->add(cp->remove_last());
210     }
211   }

212 
213   // Update the forwarding information for the regions in the serial
214   // compaction point.

215   G1FullGCCompactionPoint* cp = collector()->serial_compaction_point();
216   for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
217     HeapRegion* current = *it;
218     if (!cp->is_initialized()) {
219       // Initialize the compaction point. Nothing more is needed for the first heap region
220       // since it is already prepared for compaction.
221       cp->initialize(current, false);
222     } else {
223       assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
224       G1RePrepareClosure re_prepare(cp, current);
225       current->set_compaction_top(current->bottom());
226       current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
227     }
228   }
229   cp->update();

230 }
231 
232 bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
233   if (_regions_freed) {
234     return true;
235   }
236 
237   if (!_cp->has_regions()) {
238     // No regions in queue, so no free ones either.
239     return false;
240   }
241 
242   if (_cp->current_region() != _cp->regions()->last()) {
243     // The current region used for compaction is not the last in the
244     // queue. That means there is at least one free region in the queue.
245     return true;
246   }
247 
248   // No free regions in the queue.
249   return false;

 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1CollectedHeap.hpp"
 27 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 28 #include "gc/g1/g1FullCollector.inline.hpp"
 29 #include "gc/g1/g1FullGCCompactionPoint.hpp"
 30 #include "gc/g1/g1FullGCMarker.hpp"
 31 #include "gc/g1/g1FullGCOopClosures.inline.hpp"
 32 #include "gc/g1/g1FullGCPrepareTask.hpp"
 33 #include "gc/g1/g1HotCardCache.hpp"
 34 #include "gc/g1/heapRegion.inline.hpp"
 35 #include "gc/shared/gcTraceTime.inline.hpp"
 36 #include "gc/shared/referenceProcessor.hpp"
 37 #include "gc/shared/slidingForwarding.inline.hpp"
 38 #include "logging/log.hpp"
 39 #include "memory/iterator.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "utilities/ticks.hpp"
 42 
 43 template<bool is_humongous>
 44 void G1FullGCPrepareTask::G1CalculatePointersClosure::free_pinned_region(HeapRegion* hr) {
 45   _regions_freed = true;
 46   if (is_humongous) {
 47     _g1h->free_humongous_region(hr, nullptr);
 48   } else {
 49     _g1h->free_region(hr, nullptr);
 50   }
 51   prepare_for_compaction(hr);
 52   _collector->set_invalid(hr->hrm_index());
 53 }
 54 
 55 bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) {
 56   bool force_not_compacted = false;
 57   if (should_compact(hr)) {

141   if (hr->is_pinned()) {
142     return false;
143   }
144   size_t live_words = _collector->live_words(hr->hrm_index());
145   size_t live_words_threshold = _collector->scope()->region_compaction_threshold();
146   // High live ratio region will not be compacted.
147   return live_words <= live_words_threshold;
148 }
149 
150 void G1FullGCPrepareTask::G1CalculatePointersClosure::reset_region_metadata(HeapRegion* hr) {
151   hr->rem_set()->clear();
152   hr->clear_cardtable();
153 
154   G1HotCardCache* hcc = _g1h->hot_card_cache();
155   if (hcc->use_cache()) {
156     hcc->reset_card_counts(hr);
157   }
158 }
159 
160 G1FullGCPrepareTask::G1PrepareCompactLiveClosure::G1PrepareCompactLiveClosure(G1FullGCCompactionPoint* cp) :
161     _cp(cp), _forwarding(G1CollectedHeap::heap()->forwarding()) { }
162 
163 size_t G1FullGCPrepareTask::G1PrepareCompactLiveClosure::apply(oop object) {
164   size_t size = object->size();
165   _cp->forward(_forwarding, object, size);
166   return size;
167 }
168 
169 size_t G1FullGCPrepareTask::G1RePrepareClosure::apply(oop obj) {
170   ShouldNotReachHere();
171   // We only re-prepare objects forwarded within the current region, so
172   // skip objects that are already forwarded to another region.
173   /*
174   oop forwarded_to = obj->forwardee();
175   if (forwarded_to != NULL && !_current->is_in(forwarded_to)) {
176     return obj->size();
177   }
178 
179   // Get size and forward.
180   size_t size = obj->size();
181   _cp->forward(_forwarding, obj, size);
182 
183   return size;
184   */
185   return 0;
186 }
187 
188 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction_work(G1FullGCCompactionPoint* cp,
189                                                                                   HeapRegion* hr) {
190   G1PrepareCompactLiveClosure prepare_compact(cp);
191   hr->set_compaction_top(hr->bottom());
192   hr->apply_to_marked_objects(_bitmap, &prepare_compact);
193 }
194 
195 void G1FullGCPrepareTask::G1CalculatePointersClosure::prepare_for_compaction(HeapRegion* hr) {
196   if (!_cp->is_initialized()) {
197     hr->set_compaction_top(hr->bottom());
198     _cp->initialize(hr, true);
199   }
200   // Add region to the compaction queue and prepare it.
201   _cp->add(hr);
202   prepare_for_compaction_work(_cp, hr);
203 }
204 
205 void G1FullGCPrepareTask::prepare_serial_compaction() {
206   ShouldNotReachHere(); // Disabled in Lilliput.
207   // GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare Serial Compaction", collector()->scope()->timer());
208   // At this point we know that no regions were completely freed by
209   // the parallel compaction. That means that the last region of
210   // all compaction queues still have data in them. We try to compact
211   // these regions in serial to avoid a premature OOM.
212   /*
213   for (uint i = 0; i < collector()->workers(); i++) {
214     G1FullGCCompactionPoint* cp = collector()->compaction_point(i);
215     if (cp->has_regions()) {
216       collector()->serial_compaction_point()->add(cp->remove_last());
217     }
218   }
219   */
220 
221   // Update the forwarding information for the regions in the serial
222   // compaction point.
223   /*
224   G1FullGCCompactionPoint* cp = collector()->serial_compaction_point();
225   for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
226     HeapRegion* current = *it;
227     if (!cp->is_initialized()) {
228       // Initialize the compaction point. Nothing more is needed for the first heap region
229       // since it is already prepared for compaction.
230       cp->initialize(current, false);
231     } else {
232       assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
233       G1RePrepareClosure re_prepare(cp, current);
234       current->set_compaction_top(current->bottom());
235       current->apply_to_marked_objects(collector()->mark_bitmap(), &re_prepare);
236     }
237   }
238   cp->update();
239   */
240 }
241 
242 bool G1FullGCPrepareTask::G1CalculatePointersClosure::freed_regions() {
243   if (_regions_freed) {
244     return true;
245   }
246 
247   if (!_cp->has_regions()) {
248     // No regions in queue, so no free ones either.
249     return false;
250   }
251 
252   if (_cp->current_region() != _cp->regions()->last()) {
253     // The current region used for compaction is not the last in the
254     // queue. That means there is at least one free region in the queue.
255     return true;
256   }
257 
258   // No free regions in the queue.
259   return false;
< prev index next >