< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.hpp

Print this page

  66 class G1ArchiveAllocator;
  67 class G1BatchedTask;
  68 class G1CardTableEntryClosure;
  69 class G1ConcurrentMark;
  70 class G1ConcurrentMarkThread;
  71 class G1ConcurrentRefine;
  72 class G1GCCounters;
  73 class G1GCPhaseTimes;
  74 class G1HeapSizingPolicy;
  75 class G1HotCardCache;
  76 class G1NewTracer;
  77 class G1RemSet;
  78 class G1ServiceTask;
  79 class G1ServiceThread;
  80 class GCMemoryManager;
  81 class HeapRegion;
  82 class MemoryPool;
  83 class nmethod;
  84 class ReferenceProcessor;
  85 class STWGCTimer;

  86 class WorkerThreads;
  87 
  88 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  89 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  90 
  91 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
  92 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  93 
  94 // The G1 STW is alive closure.
  95 // An instance is embedded into the G1CH and used as the
  96 // (optional) _is_alive_non_header closure in the STW
  97 // reference processor. It is also extensively used during
  98 // reference processing during STW evacuation pauses.
  99 class G1STWIsAliveClosure : public BoolObjectClosure {
 100   G1CollectedHeap* _g1h;
 101 public:
 102   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 103   bool do_object_b(oop p) override;
 104 };
 105 

 210   // Number of bytes used in all regions during GC. Typically changed when
 211   // retiring a GC alloc region.
 212   size_t _bytes_used_during_gc;
 213 
 214 public:
 215   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 216 
 217 private:
 218   // Class that handles archive allocation ranges.
 219   G1ArchiveAllocator* _archive_allocator;
 220 
 221   // GC allocation statistics policy for survivors.
 222   G1EvacStats _survivor_evac_stats;
 223 
 224   // GC allocation statistics policy for tenured objects.
 225   G1EvacStats _old_evac_stats;
 226 
 227   // Helper for monitoring and management support.
 228   G1MonitoringSupport* _monitoring_support;
 229 


 230   // Records whether the region at the given index is (still) a
 231   // candidate for eager reclaim.  Only valid for humongous start
 232   // regions; other regions have unspecified values.  Humongous start
 233   // regions are initialized at start of collection pause, with
 234   // candidates removed from the set as they are found reachable from
 235   // roots or the young generation.
 236   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 237   protected:
 238     bool default_value() const override { return false; }
 239   public:
 240     void clear() { G1BiasedMappedArray<bool>::clear(); }
 241     void set_candidate(uint region, bool value) {
 242       set_by_index(region, value);
 243     }
 244     bool is_candidate(uint region) {
 245       return get_by_index(region);
 246     }
 247   };
 248 
 249   HumongousReclaimCandidates _humongous_reclaim_candidates;
 250   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 251   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 252 public:
 253   uint num_humongous_objects() const { return _num_humongous_objects; }
 254   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 255   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 256 
 257   bool should_do_eager_reclaim() const;
 258 
 259   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 260 
 261   bool should_sample_collection_set_candidates() const;
 262   void set_collection_set_candidates_stats(G1CardSetMemoryStats& stats);
 263   void set_young_gen_card_set_stats(const G1CardSetMemoryStats& stats);
 264 




 265 private:
 266 
 267   G1HRPrinter _hr_printer;
 268 
 269   // Return true if an explicit GC should start a concurrent cycle instead
 270   // of doing a STW full GC. A concurrent cycle should be started if:
 271   // (a) cause == _g1_humongous_allocation,
 272   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 273   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 274   // (d) cause == _wb_conc_mark or _wb_breakpoint,
 275   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 276   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 277 
 278   // Attempt to start a concurrent cycle with the indicated cause.
 279   // precondition: should_do_concurrent_full_gc(cause)
 280   bool try_collect_concurrently(GCCause::Cause cause,
 281                                 uint gc_counter,
 282                                 uint old_marking_started_before);
 283 
 284   // indicates whether we are in young or mixed GC mode

  66 class G1ArchiveAllocator;
  67 class G1BatchedTask;
  68 class G1CardTableEntryClosure;
  69 class G1ConcurrentMark;
  70 class G1ConcurrentMarkThread;
  71 class G1ConcurrentRefine;
  72 class G1GCCounters;
  73 class G1GCPhaseTimes;
  74 class G1HeapSizingPolicy;
  75 class G1HotCardCache;
  76 class G1NewTracer;
  77 class G1RemSet;
  78 class G1ServiceTask;
  79 class G1ServiceThread;
  80 class GCMemoryManager;
  81 class HeapRegion;
  82 class MemoryPool;
  83 class nmethod;
  84 class ReferenceProcessor;
  85 class STWGCTimer;
  86 class SlidingForwarding;
  87 class WorkerThreads;
  88 
  89 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  90 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  91 
  92 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
  93 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  94 
  95 // The G1 STW is alive closure.
  96 // An instance is embedded into the G1CH and used as the
  97 // (optional) _is_alive_non_header closure in the STW
  98 // reference processor. It is also extensively used during
  99 // reference processing during STW evacuation pauses.
 100 class G1STWIsAliveClosure : public BoolObjectClosure {
 101   G1CollectedHeap* _g1h;
 102 public:
 103   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 104   bool do_object_b(oop p) override;
 105 };
 106 

 211   // Number of bytes used in all regions during GC. Typically changed when
 212   // retiring a GC alloc region.
 213   size_t _bytes_used_during_gc;
 214 
 215 public:
 216   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 217 
 218 private:
 219   // Class that handles archive allocation ranges.
 220   G1ArchiveAllocator* _archive_allocator;
 221 
 222   // GC allocation statistics policy for survivors.
 223   G1EvacStats _survivor_evac_stats;
 224 
 225   // GC allocation statistics policy for tenured objects.
 226   G1EvacStats _old_evac_stats;
 227 
 228   // Helper for monitoring and management support.
 229   G1MonitoringSupport* _monitoring_support;
 230 
 231   SlidingForwarding* _forwarding;
 232 
 233   // Records whether the region at the given index is (still) a
 234   // candidate for eager reclaim.  Only valid for humongous start
 235   // regions; other regions have unspecified values.  Humongous start
 236   // regions are initialized at start of collection pause, with
 237   // candidates removed from the set as they are found reachable from
 238   // roots or the young generation.
 239   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 240   protected:
 241     bool default_value() const override { return false; }
 242   public:
 243     void clear() { G1BiasedMappedArray<bool>::clear(); }
 244     void set_candidate(uint region, bool value) {
 245       set_by_index(region, value);
 246     }
 247     bool is_candidate(uint region) {
 248       return get_by_index(region);
 249     }
 250   };
 251 
 252   HumongousReclaimCandidates _humongous_reclaim_candidates;
 253   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 254   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 255 public:
 256   uint num_humongous_objects() const { return _num_humongous_objects; }
 257   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 258   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 259 
 260   bool should_do_eager_reclaim() const;
 261 
 262   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 263 
 264   bool should_sample_collection_set_candidates() const;
 265   void set_collection_set_candidates_stats(G1CardSetMemoryStats& stats);
 266   void set_young_gen_card_set_stats(const G1CardSetMemoryStats& stats);
 267 
 268   SlidingForwarding* forwarding() const {
 269     return _forwarding;
 270   }
 271 
 272 private:
 273 
 274   G1HRPrinter _hr_printer;
 275 
 276   // Return true if an explicit GC should start a concurrent cycle instead
 277   // of doing a STW full GC. A concurrent cycle should be started if:
 278   // (a) cause == _g1_humongous_allocation,
 279   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 280   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 281   // (d) cause == _wb_conc_mark or _wb_breakpoint,
 282   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 283   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 284 
 285   // Attempt to start a concurrent cycle with the indicated cause.
 286   // precondition: should_do_concurrent_full_gc(cause)
 287   bool try_collect_concurrently(GCCause::Cause cause,
 288                                 uint gc_counter,
 289                                 uint old_marking_started_before);
 290 
 291   // indicates whether we are in young or mixed GC mode
< prev index next >