< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page

  68 // Forward declarations
  69 class G1Allocator;
  70 class G1BatchedTask;
  71 class G1CardTableEntryClosure;
  72 class G1ConcurrentMark;
  73 class G1ConcurrentMarkThread;
  74 class G1ConcurrentRefine;
  75 class G1GCCounters;
  76 class G1GCPhaseTimes;
  77 class G1HeapSizingPolicy;
  78 class G1NewTracer;
  79 class G1RemSet;
  80 class G1ServiceTask;
  81 class G1ServiceThread;
  82 class GCMemoryManager;
  83 class HeapRegion;
  84 class MemoryPool;
  85 class nmethod;
  86 class ReferenceProcessor;
  87 class STWGCTimer;

  88 class WorkerThreads;
  89 
  90 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  91 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  92 
  93 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
  94 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  95 
  96 // The G1 STW is alive closure.
  97 // An instance is embedded into the G1CH and used as the
  98 // (optional) _is_alive_non_header closure in the STW
  99 // reference processor. It is also extensively used during
 100 // reference processing during STW evacuation pauses.
 101 class G1STWIsAliveClosure : public BoolObjectClosure {
 102   G1CollectedHeap* _g1h;
 103 public:
 104   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 105   bool do_object_b(oop p) override;
 106 };
 107 

 235 
 236   void set_used(size_t bytes);
 237 
 238   // Number of bytes used in all regions during GC. Typically changed when
 239   // retiring a GC alloc region.
 240   size_t _bytes_used_during_gc;
 241 
 242 public:
 243   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 244 
 245 private:
 246   // GC allocation statistics policy for survivors.
 247   G1EvacStats _survivor_evac_stats;
 248 
 249   // GC allocation statistics policy for tenured objects.
 250   G1EvacStats _old_evac_stats;
 251 
 252   // Helper for monitoring and management support.
 253   G1MonitoringSupport* _monitoring_support;
 254 


 255   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 256   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 257 public:
 258   uint num_humongous_objects() const { return _num_humongous_objects; }
 259   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 260   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 261 
 262   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 263 
 264   bool should_sample_collection_set_candidates() const;
 265   void set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats);
 266   void set_young_gen_card_set_stats(const G1MonotonicArenaMemoryStats& stats);
 267 




 268 private:
 269 
 270   G1HRPrinter _hr_printer;
 271 
 272   // Return true if an explicit GC should start a concurrent cycle instead
 273   // of doing a STW full GC. A concurrent cycle should be started if:
 274   // (a) cause == _g1_humongous_allocation,
 275   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 276   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 277   // (d) cause == _wb_breakpoint,
 278   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 279   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 280 
 281   // Attempt to start a concurrent cycle with the indicated cause.
 282   // precondition: should_do_concurrent_full_gc(cause)
 283   bool try_collect_concurrently(GCCause::Cause cause,
 284                                 uint gc_counter,
 285                                 uint old_marking_started_before);
 286 
 287   // indicates whether we are in young or mixed GC mode

  68 // Forward declarations
  69 class G1Allocator;
  70 class G1BatchedTask;
  71 class G1CardTableEntryClosure;
  72 class G1ConcurrentMark;
  73 class G1ConcurrentMarkThread;
  74 class G1ConcurrentRefine;
  75 class G1GCCounters;
  76 class G1GCPhaseTimes;
  77 class G1HeapSizingPolicy;
  78 class G1NewTracer;
  79 class G1RemSet;
  80 class G1ServiceTask;
  81 class G1ServiceThread;
  82 class GCMemoryManager;
  83 class HeapRegion;
  84 class MemoryPool;
  85 class nmethod;
  86 class ReferenceProcessor;
  87 class STWGCTimer;
  88 class SlidingForwarding;
  89 class WorkerThreads;
  90 
  91 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  92 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  93 
  94 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
  95 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  96 
  97 // The G1 STW is alive closure.
  98 // An instance is embedded into the G1CH and used as the
  99 // (optional) _is_alive_non_header closure in the STW
 100 // reference processor. It is also extensively used during
 101 // reference processing during STW evacuation pauses.
 102 class G1STWIsAliveClosure : public BoolObjectClosure {
 103   G1CollectedHeap* _g1h;
 104 public:
 105   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 106   bool do_object_b(oop p) override;
 107 };
 108 

 236 
 237   void set_used(size_t bytes);
 238 
 239   // Number of bytes used in all regions during GC. Typically changed when
 240   // retiring a GC alloc region.
 241   size_t _bytes_used_during_gc;
 242 
 243 public:
 244   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 245 
 246 private:
 247   // GC allocation statistics policy for survivors.
 248   G1EvacStats _survivor_evac_stats;
 249 
 250   // GC allocation statistics policy for tenured objects.
 251   G1EvacStats _old_evac_stats;
 252 
 253   // Helper for monitoring and management support.
 254   G1MonitoringSupport* _monitoring_support;
 255 
 256   SlidingForwarding* _forwarding;
 257 
 258   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 259   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 260 public:
 261   uint num_humongous_objects() const { return _num_humongous_objects; }
 262   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 263   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 264 
 265   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 266 
 267   bool should_sample_collection_set_candidates() const;
 268   void set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats);
 269   void set_young_gen_card_set_stats(const G1MonotonicArenaMemoryStats& stats);
 270 
 271   SlidingForwarding* forwarding() const {
 272     return _forwarding;
 273   }
 274 
 275 private:
 276 
 277   G1HRPrinter _hr_printer;
 278 
 279   // Return true if an explicit GC should start a concurrent cycle instead
 280   // of doing a STW full GC. A concurrent cycle should be started if:
 281   // (a) cause == _g1_humongous_allocation,
 282   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 283   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 284   // (d) cause == _wb_breakpoint,
 285   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 286   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 287 
 288   // Attempt to start a concurrent cycle with the indicated cause.
 289   // precondition: should_do_concurrent_full_gc(cause)
 290   bool try_collect_concurrently(GCCause::Cause cause,
 291                                 uint gc_counter,
 292                                 uint old_marking_started_before);
 293 
 294   // indicates whether we are in young or mixed GC mode
< prev index next >