< prev index next >

src/hotspot/share/gc/g1/g1FullCollector.cpp

Print this page

 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/systemDictionary.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "compiler/oopMap.hpp"
 29 #include "gc/g1/g1CollectedHeap.hpp"
 30 #include "gc/g1/g1FullCollector.inline.hpp"
 31 #include "gc/g1/g1FullGCAdjustTask.hpp"
 32 #include "gc/g1/g1FullGCCompactTask.hpp"
 33 #include "gc/g1/g1FullGCMarker.inline.hpp"
 34 #include "gc/g1/g1FullGCMarkTask.hpp"
 35 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 36 #include "gc/g1/g1FullGCScope.hpp"
 37 #include "gc/g1/g1OopClosures.hpp"
 38 #include "gc/g1/g1Policy.hpp"
 39 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 40 #include "gc/shared/gcTraceTime.inline.hpp"
 41 #include "gc/shared/preservedMarks.hpp"
 42 #include "gc/shared/referenceProcessor.hpp"

 43 #include "gc/shared/verifyOption.hpp"
 44 #include "gc/shared/weakProcessor.inline.hpp"
 45 #include "gc/shared/workerPolicy.hpp"
 46 #include "logging/log.hpp"
 47 #include "runtime/continuation.hpp"
 48 #include "runtime/handles.inline.hpp"
 49 #include "utilities/debug.hpp"
 50 
 51 static void clear_and_activate_derived_pointers() {
 52 #if COMPILER2_OR_JVMCI
 53   DerivedPointerTable::clear();
 54 #endif
 55 }
 56 
 57 static void deactivate_derived_pointers() {
 58 #if COMPILER2_OR_JVMCI
 59   DerivedPointerTable::set_active(false);
 60 #endif
 61 }
 62 

296   }
297 
298   // Class unloading and cleanup.
299   if (ClassUnloading) {
300     GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
301     // Unload classes and purge the SystemDictionary.
302     bool purged_class = SystemDictionary::do_unloading(scope()->timer());
303     _heap->complete_cleaning(&_is_alive, purged_class);
304   }
305 
306   scope()->tracer()->report_object_count_after_gc(&_is_alive);
307 #if TASKQUEUE_STATS
308   oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
309   array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
310 #endif
311 }
312 
313 void G1FullCollector::phase2_prepare_compaction() {
314   GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
315 


316   phase2a_determine_worklists();
317 
318   bool has_free_compaction_targets = phase2b_forward_oops();
319 
320   // Try to avoid OOM immediately after Full GC in case there are no free regions
321   // left after determining the result locations (i.e. this phase). Prepare to
322   // maximally compact the tail regions of the compaction queues serially.
323   if (!has_free_compaction_targets) {
324     phase2c_prepare_serial_compaction();
325   }

326 }
327 
328 void G1FullCollector::phase2a_determine_worklists() {
329   GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
330 
331   G1DetermineCompactionQueueClosure cl(this);
332   _heap->heap_region_iterate(&cl);
333 }
334 
335 bool G1FullCollector::phase2b_forward_oops() {
336   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
337 
338   G1FullGCPrepareTask task(this);
339   run_task(&task);
340 
341   return task.has_free_compaction_targets();
342 }
343 
344 void G1FullCollector::phase2c_prepare_serial_compaction() {
345   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
346   // At this point we know that after parallel compaction there will be no
347   // completely free regions. That means that the last region of
348   // all compaction queues still have data in them. We try to compact
349   // these regions in serial to avoid a premature OOM when the mutator wants
350   // to allocate the first eden region after gc.
351   for (uint i = 0; i < workers(); i++) {
352     G1FullGCCompactionPoint* cp = compaction_point(i);
353     if (cp->has_regions()) {
354       serial_compaction_point()->add(cp->remove_last());
355     }
356   }
357 
358   // Update the forwarding information for the regions in the serial
359   // compaction point.
360   G1FullGCCompactionPoint* cp = serial_compaction_point();
361   for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
362     HeapRegion* current = *it;
363     if (!cp->is_initialized()) {
364       // Initialize the compaction point. Nothing more is needed for the first heap region
365       // since it is already prepared for compaction.
366       cp->initialize(current);
367     } else {
368       assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
369       G1SerialRePrepareClosure re_prepare(cp, current);
370       current->set_compaction_top(current->bottom());
371       current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
372     }
373   }
374   cp->update();

375 }
376 
377 void G1FullCollector::phase3_adjust_pointers() {
378   // Adjust the pointers to reflect the new locations
379   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
380 
381   G1FullGCAdjustTask task(this);
382   run_task(&task);
383 }
384 
385 void G1FullCollector::phase4_do_compaction() {
386   // Compact the heap using the compaction queues created in phase 2.
387   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
388   G1FullGCCompactTask task(this);
389   run_task(&task);
390 
391   // Serial compact to avoid OOM when very few free regions.
392   if (serial_compaction_point()->has_regions()) {
393     task.serial_compaction();
394   }

 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/systemDictionary.hpp"
 27 #include "code/codeCache.hpp"
 28 #include "compiler/oopMap.hpp"
 29 #include "gc/g1/g1CollectedHeap.hpp"
 30 #include "gc/g1/g1FullCollector.inline.hpp"
 31 #include "gc/g1/g1FullGCAdjustTask.hpp"
 32 #include "gc/g1/g1FullGCCompactTask.hpp"
 33 #include "gc/g1/g1FullGCMarker.inline.hpp"
 34 #include "gc/g1/g1FullGCMarkTask.hpp"
 35 #include "gc/g1/g1FullGCPrepareTask.inline.hpp"
 36 #include "gc/g1/g1FullGCScope.hpp"
 37 #include "gc/g1/g1OopClosures.hpp"
 38 #include "gc/g1/g1Policy.hpp"
 39 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
 40 #include "gc/shared/gcTraceTime.inline.hpp"
 41 #include "gc/shared/preservedMarks.hpp"
 42 #include "gc/shared/referenceProcessor.hpp"
 43 #include "gc/shared/slidingForwarding.hpp"
 44 #include "gc/shared/verifyOption.hpp"
 45 #include "gc/shared/weakProcessor.inline.hpp"
 46 #include "gc/shared/workerPolicy.hpp"
 47 #include "logging/log.hpp"
 48 #include "runtime/continuation.hpp"
 49 #include "runtime/handles.inline.hpp"
 50 #include "utilities/debug.hpp"
 51 
 52 static void clear_and_activate_derived_pointers() {
 53 #if COMPILER2_OR_JVMCI
 54   DerivedPointerTable::clear();
 55 #endif
 56 }
 57 
 58 static void deactivate_derived_pointers() {
 59 #if COMPILER2_OR_JVMCI
 60   DerivedPointerTable::set_active(false);
 61 #endif
 62 }
 63 

297   }
298 
299   // Class unloading and cleanup.
300   if (ClassUnloading) {
301     GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer());
302     // Unload classes and purge the SystemDictionary.
303     bool purged_class = SystemDictionary::do_unloading(scope()->timer());
304     _heap->complete_cleaning(&_is_alive, purged_class);
305   }
306 
307   scope()->tracer()->report_object_count_after_gc(&_is_alive);
308 #if TASKQUEUE_STATS
309   oop_queue_set()->print_and_reset_taskqueue_stats("Oop Queue");
310   array_queue_set()->print_and_reset_taskqueue_stats("ObjArrayOop Queue");
311 #endif
312 }
313 
314 void G1FullCollector::phase2_prepare_compaction() {
315   GCTraceTime(Info, gc, phases) info("Phase 2: Prepare compaction", scope()->timer());
316 
317   _heap->forwarding()->clear();
318 
319   phase2a_determine_worklists();
320 
321   bool has_free_compaction_targets = phase2b_forward_oops();
322 
323   // Try to avoid OOM immediately after Full GC in case there are no free regions
324   // left after determining the result locations (i.e. this phase). Prepare to
325   // maximally compact the tail regions of the compaction queues serially.
326   // TODO: Disabled for now because it violates sliding-forwarding assumption.
327   // if (!has_free_compaction_targets) {
328   //   phase2c_prepare_serial_compaction();
329   // }
330 }
331 
332 void G1FullCollector::phase2a_determine_worklists() {
333   GCTraceTime(Debug, gc, phases) debug("Phase 2: Determine work lists", scope()->timer());
334 
335   G1DetermineCompactionQueueClosure cl(this);
336   _heap->heap_region_iterate(&cl);
337 }
338 
339 bool G1FullCollector::phase2b_forward_oops() {
340   GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare parallel compaction", scope()->timer());
341 
342   G1FullGCPrepareTask task(this);
343   run_task(&task);
344 
345   return task.has_free_compaction_targets();
346 }
347 
348 void G1FullCollector::phase2c_prepare_serial_compaction() {
349   ShouldNotReachHere(); // Disabled in Lilliput.
350 //  GCTraceTime(Debug, gc, phases) debug("Phase 2: Prepare serial compaction", scope()->timer());
351 //  // At this point we know that after parallel compaction there will be no
352 //  // completely free regions. That means that the last region of
353 //  // all compaction queues still have data in them. We try to compact
354 //  // these regions in serial to avoid a premature OOM when the mutator wants
355 //  // to allocate the first eden region after gc.
356 //  for (uint i = 0; i < workers(); i++) {
357 //    G1FullGCCompactionPoint* cp = compaction_point(i);
358 //    if (cp->has_regions()) {
359 //      serial_compaction_point()->add(cp->remove_last());
360 //    }
361 //  }
362 //
363 //  // Update the forwarding information for the regions in the serial
364 //  // compaction point.
365 //  G1FullGCCompactionPoint* cp = serial_compaction_point();
366 //  for (GrowableArrayIterator<HeapRegion*> it = cp->regions()->begin(); it != cp->regions()->end(); ++it) {
367 //    HeapRegion* current = *it;
368 //    if (!cp->is_initialized()) {
369 //      // Initialize the compaction point. Nothing more is needed for the first heap region
370 //      // since it is already prepared for compaction.
371 //      cp->initialize(current);
372 //    } else {
373 //      assert(!current->is_humongous(), "Should be no humongous regions in compaction queue");
374 //      G1SerialRePrepareClosure re_prepare(cp, current);
375 //      current->set_compaction_top(current->bottom());
376 //      current->apply_to_marked_objects(mark_bitmap(), &re_prepare);
377 //    }
378 //  }
379 //  cp->update();
380 }
381 
382 void G1FullCollector::phase3_adjust_pointers() {
383   // Adjust the pointers to reflect the new locations
384   GCTraceTime(Info, gc, phases) info("Phase 3: Adjust pointers", scope()->timer());
385 
386   G1FullGCAdjustTask task(this);
387   run_task(&task);
388 }
389 
390 void G1FullCollector::phase4_do_compaction() {
391   // Compact the heap using the compaction queues created in phase 2.
392   GCTraceTime(Info, gc, phases) info("Phase 4: Compact heap", scope()->timer());
393   G1FullGCCompactTask task(this);
394   run_task(&task);
395 
396   // Serial compact to avoid OOM when very few free regions.
397   if (serial_compaction_point()->has_regions()) {
398     task.serial_compaction();
399   }
< prev index next >