< prev index next >

src/hotspot/share/gc/g1/g1FullCollector.cpp

Print this page

 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/systemDictionary.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "compiler/oopMap.hpp"
 29 #include "gc/g1/g1CollectedHeap.hpp"
 30 #include "gc/g1/g1FullCollector.inline.hpp"
 31 #include "gc/g1/g1FullGCAdjustTask.hpp"
 32 #include "gc/g1/g1FullGCCompactTask.hpp"
 33 #include "gc/g1/g1FullGCMarker.inline.hpp"
 34 #include "gc/g1/g1FullGCMarkTask.hpp"
 35 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 36 #include "gc/g1/g1FullGCScope.hpp"
 37 #include "gc/g1/g1OopClosures.hpp"
 38 #include "gc/g1/g1Policy.hpp"
 39 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 40 #include "gc/shared/gcTraceTime.inline.hpp"
 41 #include "gc/shared/preservedMarks.hpp"
 42 #include "gc/shared/referenceProcessor.hpp"

 43 #include "gc/shared/verifyOption.hpp"
 44 #include "gc/shared/weakProcessor.inline.hpp"
 45 #include "gc/shared/workerPolicy.hpp"
 46 #include "logging/log.hpp"
 47 #include "runtime/handles.inline.hpp"
 48 #include "utilities/debug.hpp"
 49 
 50 static void clear_and_activate_derived_pointers() {
 51 #if COMPILER2_OR_JVMCI
 52   DerivedPointerTable::clear();
 53 #endif
 54 }
 55 
 56 static void deactivate_derived_pointers() {
 57 #if COMPILER2_OR_JVMCI
 58   DerivedPointerTable::set_active(false);
 59 #endif
 60 }
 61 
 62 static void update_derived_pointers() {

286   }
287 
288   // Class unloading and cleanup.
289   if (ClassUnloading) {
290     GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
291     // Unload classes and purge the SystemDictionary.
292     bool purged_class = SystemDictionary::do_unloading(scope()->timer());
293     _heap->complete_cleaning(&_is_alive, purged_class);
294   }
295 
296   scope()->tracer()->report_object_count_after_gc(&_is_alive);
297 #if TASKQUEUE_STATS
298   oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
299   array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
300 #endif
301 }
302 
303 void G1FullCollector::phase2_prepare_compaction() {
304   GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
305 


306   phase2a_determine_worklists();
307 
308   bool has_free_compaction_targets = phase2b_forward_oops();
309 
310   // Try to avoid OOM immediately after Full GC in case there are no free regions
311   // left after determining the result locations (i.e. this phase). Prepare to
312   // maximally compact the tail regions of the compaction queues serially.
313   if (!has_free_compaction_targets) {
314     phase2c_prepare_serial_compaction();
315   }

316 }
317 
318 void G1FullCollector::phase2a_determine_worklists() {
319   GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
320 
321   G1DetermineCompactionQueueClosure cl(this);
322   _heap->heap_region_iterate(&cl);
323 }
324 
325 bool G1FullCollector::phase2b_forward_oops() {
326   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
327 
328   G1FullGCPrepareTask task(this);
329   run_task(&task);
330 
331   return task.has_free_compaction_targets();
332 }
333 
334 void G1FullCollector::phase2c_prepare_serial_compaction() {
335   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
336   // At this point we know that after parallel compaction there will be no
337   // completely free regions. That means that the last region of
338   // all compaction queues still have data in them. We try to compact
339   // these regions in serial to avoid a premature OOM when the mutator wants
340   // to allocate the first eden region after gc.
341   for (uint i = 0; i < workers(); i++) {
342     G1FullGCCompactionPoint* cp = compaction_point(i);
343     if (cp->has_regions()) {
344       serial_compaction_point()->add(cp->remove_last());
345     }
346   }
347 
348   // Update the forwarding information for the regions in the serial
349   // compaction point.
350   G1FullGCCompactionPoint* cp = serial_compaction_point();
351   for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
352     HeapRegion* current = *it;
353     if (!cp->is_initialized()) {
354       // Initialize the compaction point. Nothing more is needed for the first heap region
355       // since it is already prepared for compaction.
356       cp->initialize(current);
357     } else {
358       assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
359       G1SerialRePrepareClosure re_prepare(cp, current);
360       current->set_compaction_top(current->bottom());
361       current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
362     }
363   }
364   cp->update();

365 }
366 
367 void G1FullCollector::phase3_adjust_pointers() {
368   // Adjust the pointers to reflect the new locations
369   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
370 
371   G1FullGCAdjustTask task(this);
372   run_task(&task);
373 }
374 
375 void G1FullCollector::phase4_do_compaction() {
376   // Compact the heap using the compaction queues created in phase 2.
377   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
378   G1FullGCCompactTask task(this);
379   run_task(&task);
380 
381   // Serial compact to avoid OOM when very few free regions.
382   if (serial_compaction_point()->has_regions()) {
383     task.serial_compaction();
384   }

 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/systemDictionary.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "compiler/oopMap.hpp"
 29 #include "gc/g1/g1CollectedHeap.hpp"
 30 #include "gc/g1/g1FullCollector.inline.hpp"
 31 #include "gc/g1/g1FullGCAdjustTask.hpp"
 32 #include "gc/g1/g1FullGCCompactTask.hpp"
 33 #include "gc/g1/g1FullGCMarker.inline.hpp"
 34 #include "gc/g1/g1FullGCMarkTask.hpp"
 35 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 36 #include "gc/g1/g1FullGCScope.hpp"
 37 #include "gc/g1/g1OopClosures.hpp"
 38 #include "gc/g1/g1Policy.hpp"
 39 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 40 #include "gc/shared/gcTraceTime.inline.hpp"
 41 #include "gc/shared/preservedMarks.hpp"
 42 #include "gc/shared/referenceProcessor.hpp"
 43 #include "gc/shared/slidingForwarding.hpp"
 44 #include "gc/shared/verifyOption.hpp"
 45 #include "gc/shared/weakProcessor.inline.hpp"
 46 #include "gc/shared/workerPolicy.hpp"
 47 #include "logging/log.hpp"
 48 #include "runtime/handles.inline.hpp"
 49 #include "utilities/debug.hpp"
 50 
 51 static void clear_and_activate_derived_pointers() {
 52 #if COMPILER2_OR_JVMCI
 53   DerivedPointerTable::clear();
 54 #endif
 55 }
 56 
 57 static void deactivate_derived_pointers() {
 58 #if COMPILER2_OR_JVMCI
 59   DerivedPointerTable::set_active(false);
 60 #endif
 61 }
 62 
 63 static void update_derived_pointers() {

287   }
288 
289   // Class unloading and cleanup.
290   if (ClassUnloading) {
291     GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
292     // Unload classes and purge the SystemDictionary.
293     bool purged_class = SystemDictionary::do_unloading(scope()->timer());
294     _heap->complete_cleaning(&_is_alive, purged_class);
295   }
296 
297   scope()->tracer()->report_object_count_after_gc(&_is_alive);
298 #if TASKQUEUE_STATS
299   oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
300   array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
301 #endif
302 }
303 
304 void G1FullCollector::phase2_prepare_compaction() {
305   GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
306 
307   _heap->forwarding()->clear();
308 
309   phase2a_determine_worklists();
310 
311   bool has_free_compaction_targets = phase2b_forward_oops();
312 
313   // Try to avoid OOM immediately after Full GC in case there are no free regions
314   // left after determining the result locations (i.e. this phase). Prepare to
315   // maximally compact the tail regions of the compaction queues serially.
316   // TODO: Disabled for now because it violates sliding-forwarding assumption.
317   // if (!has_free_compaction_targets) {
318   //   phase2c_prepare_serial_compaction();
319   // }
320 }
321 
322 void G1FullCollector::phase2a_determine_worklists() {
323   GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
324 
325   G1DetermineCompactionQueueClosure cl(this);
326   _heap->heap_region_iterate(&cl);
327 }
328 
329 bool G1FullCollector::phase2b_forward_oops() {
330   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
331 
332   G1FullGCPrepareTask task(this);
333   run_task(&task);
334 
335   return task.has_free_compaction_targets();
336 }
337 
338 void G1FullCollector::phase2c_prepare_serial_compaction() {
339   ShouldNotReachHere(); // Disabled in Lilliput.
340 //  GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
341 //  // At this point we know that after parallel compaction there will be no
342 //  // completely free regions. That means that the last region of
343 //  // all compaction queues still have data in them. We try to compact
344 //  // these regions in serial to avoid a premature OOM when the mutator wants
345 //  // to allocate the first eden region after gc.
346 //  for (uint i = 0; i < workers(); i++) {
347 //    G1FullGCCompactionPoint* cp = compaction_point(i);
348 //    if (cp->has_regions()) {
349 //      serial_compaction_point()->add(cp->remove_last());
350 //    }
351 //  }
352 //
353 //  // Update the forwarding information for the regions in the serial
354 //  // compaction point.
355 //  G1FullGCCompactionPoint* cp = serial_compaction_point();
356 //  for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
357 //    HeapRegion* current = *it;
358 //    if (!cp->is_initialized()) {
359 //      // Initialize the compaction point. Nothing more is needed for the first heap region
360 //      // since it is already prepared for compaction.
361 //      cp->initialize(current);
362 //    } else {
363 //      assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
364 //      G1SerialRePrepareClosure re_prepare(cp, current);
365 //      current->set_compaction_top(current->bottom());
366 //      current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
367 //    }
368 //  }
369 //  cp->update();
370 }
371 
372 void G1FullCollector::phase3_adjust_pointers() {
373   // Adjust the pointers to reflect the new locations
374   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
375 
376   G1FullGCAdjustTask task(this);
377   run_task(&task);
378 }
379 
380 void G1FullCollector::phase4_do_compaction() {
381   // Compact the heap using the compaction queues created in phase 2.
382   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
383   G1FullGCCompactTask task(this);
384   run_task(&task);
385 
386   // Serial compact to avoid OOM when very few free regions.
387   if (serial_compaction_point()->has_regions()) {
388     task.serial_compaction();
389   }
< prev index next >