< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page

  68 class G1ArchiveAllocator;
  69 class G1BatchedTask;
  70 class G1CardTableEntryClosure;
  71 class G1ConcurrentMark;
  72 class G1ConcurrentMarkThread;
  73 class G1ConcurrentRefine;
  74 class G1GCCounters;
  75 class G1GCPhaseTimes;
  76 class G1HeapSizingPolicy;
  77 class G1HotCardCache;
  78 class G1NewTracer;
  79 class G1RemSet;
  80 class G1ServiceTask;
  81 class G1ServiceThread;
  82 class GCMemoryManager;
  83 class HeapRegion;
  84 class MemoryPool;
  85 class nmethod;
  86 class ReferenceProcessor;
  87 class STWGCTimer;

  88 class WorkerThreads;
  89 
  90 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  91 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  92 
  93 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
  94 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  95 
  96 // The G1 STW is alive closure.
  97 // An instance is embedded into the G1CH and used as the
  98 // (optional) _is_alive_non_header closure in the STW
  99 // reference processor. It is also extensively used during
 100 // reference processing during STW evacuation pauses.
 101 class G1STWIsAliveClosure : public BoolObjectClosure {
 102   G1CollectedHeap* _g1h;
 103 public:
 104   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 105   bool do_object_b(oop p) override;
 106 };
 107 

 212   // Number of bytes used in all regions during GC. Typically changed when
 213   // retiring a GC alloc region.
 214   size_t _bytes_used_during_gc;
 215 
 216 public:
 217   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 218 
 219 private:
 220   // Class that handles archive allocation ranges.
 221   G1ArchiveAllocator* _archive_allocator;
 222 
 223   // GC allocation statistics policy for survivors.
 224   G1EvacStats _survivor_evac_stats;
 225 
 226   // GC allocation statistics policy for tenured objects.
 227   G1EvacStats _old_evac_stats;
 228 
 229   // Helper for monitoring and management support.
 230   G1MonitoringSupport* _monitoring_support;
 231 


 232   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 233   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 234 public:
 235   uint num_humongous_objects() const { return _num_humongous_objects; }
 236   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 237   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 238 
 239   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 240 
 241   bool should_sample_collection_set_candidates() const;
 242   void set_collection_set_candidates_stats(G1SegmentedArrayMemoryStats& stats);
 243   void set_young_gen_card_set_stats(const G1SegmentedArrayMemoryStats& stats);
 244 




 245 private:
 246 
 247   G1HRPrinter _hr_printer;
 248 
 249   // Return true if an explicit GC should start a concurrent cycle instead
 250   // of doing a STW full GC. A concurrent cycle should be started if:
 251   // (a) cause == _g1_humongous_allocation,
 252   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 253   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 254   // (d) cause == _wb_conc_mark or _wb_breakpoint,
 255   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 256   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 257 
 258   // Attempt to start a concurrent cycle with the indicated cause.
 259   // precondition: should_do_concurrent_full_gc(cause)
 260   bool try_collect_concurrently(GCCause::Cause cause,
 261                                 uint gc_counter,
 262                                 uint old_marking_started_before);
 263 
 264   // indicates whether we are in young or mixed GC mode

  68 class G1ArchiveAllocator;
  69 class G1BatchedTask;
  70 class G1CardTableEntryClosure;
  71 class G1ConcurrentMark;
  72 class G1ConcurrentMarkThread;
  73 class G1ConcurrentRefine;
  74 class G1GCCounters;
  75 class G1GCPhaseTimes;
  76 class G1HeapSizingPolicy;
  77 class G1HotCardCache;
  78 class G1NewTracer;
  79 class G1RemSet;
  80 class G1ServiceTask;
  81 class G1ServiceThread;
  82 class GCMemoryManager;
  83 class HeapRegion;
  84 class MemoryPool;
  85 class nmethod;
  86 class ReferenceProcessor;
  87 class STWGCTimer;
  88 class SlidingForwarding;
  89 class WorkerThreads;
  90 
  91 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  92 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  93 
  94 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
  95 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  96 
  97 // The G1 STW is alive closure.
  98 // An instance is embedded into the G1CH and used as the
  99 // (optional) _is_alive_non_header closure in the STW
 100 // reference processor. It is also extensively used during
 101 // reference processing during STW evacuation pauses.
 102 class G1STWIsAliveClosure : public BoolObjectClosure {
 103   G1CollectedHeap* _g1h;
 104 public:
 105   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 106   bool do_object_b(oop p) override;
 107 };
 108 

 213   // Number of bytes used in all regions during GC. Typically changed when
 214   // retiring a GC alloc region.
 215   size_t _bytes_used_during_gc;
 216 
 217 public:
 218   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 219 
 220 private:
 221   // Class that handles archive allocation ranges.
 222   G1ArchiveAllocator* _archive_allocator;
 223 
 224   // GC allocation statistics policy for survivors.
 225   G1EvacStats _survivor_evac_stats;
 226 
 227   // GC allocation statistics policy for tenured objects.
 228   G1EvacStats _old_evac_stats;
 229 
 230   // Helper for monitoring and management support.
 231   G1MonitoringSupport* _monitoring_support;
 232 
 233   SlidingForwarding* _forwarding;
 234 
 235   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 236   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 237 public:
 238   uint num_humongous_objects() const { return _num_humongous_objects; }
 239   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 240   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 241 
 242   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 243 
 244   bool should_sample_collection_set_candidates() const;
 245   void set_collection_set_candidates_stats(G1SegmentedArrayMemoryStats& stats);
 246   void set_young_gen_card_set_stats(const G1SegmentedArrayMemoryStats& stats);
 247 
 248   SlidingForwarding* forwarding() const {
 249     return _forwarding;
 250   }
 251 
 252 private:
 253 
 254   G1HRPrinter _hr_printer;
 255 
 256   // Return true if an explicit GC should start a concurrent cycle instead
 257   // of doing a STW full GC. A concurrent cycle should be started if:
 258   // (a) cause == _g1_humongous_allocation,
 259   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 260   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 261   // (d) cause == _wb_conc_mark or _wb_breakpoint,
 262   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 263   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 264 
 265   // Attempt to start a concurrent cycle with the indicated cause.
 266   // precondition: should_do_concurrent_full_gc(cause)
 267   bool try_collect_concurrently(GCCause::Cause cause,
 268                                 uint gc_counter,
 269                                 uint old_marking_started_before);
 270 
 271   // indicates whether we are in young or mixed GC mode
< prev index next >