67 class G1ArchiveAllocator;
68 class G1BatchedTask;
69 class G1CardTableEntryClosure;
70 class G1ConcurrentMark;
71 class G1ConcurrentMarkThread;
72 class G1ConcurrentRefine;
73 class G1GCCounters;
74 class G1GCPhaseTimes;
75 class G1HeapSizingPolicy;
76 class G1HotCardCache;
77 class G1NewTracer;
78 class G1RemSet;
79 class G1ServiceTask;
80 class G1ServiceThread;
81 class GCMemoryManager;
82 class HeapRegion;
83 class MemoryPool;
84 class nmethod;
85 class ReferenceProcessor;
86 class STWGCTimer;
87 class WorkerThreads;
88
89 typedef OverflowTaskQueue<ScannerTask, mtGC> G1ScannerTasksQueue;
90 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
91
92 typedef int RegionIdx_t; // needs to hold [ 0..max_reserved_regions() )
93 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
94
95 // The G1 STW is alive closure.
96 // An instance is embedded into the G1CH and used as the
97 // (optional) _is_alive_non_header closure in the STW
98 // reference processor. It is also extensively used during
99 // reference processing during STW evacuation pauses.
100 class G1STWIsAliveClosure : public BoolObjectClosure {
101 G1CollectedHeap* _g1h;
102 public:
103 G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
104 bool do_object_b(oop p) override;
105 };
106
211 // Number of bytes used in all regions during GC. Typically changed when
212 // retiring a GC alloc region.
213 size_t _bytes_used_during_gc;
214
215 public:
216 size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
217
218 private:
219 // Class that handles archive allocation ranges.
220 G1ArchiveAllocator* _archive_allocator;
221
222 // GC allocation statistics policy for survivors.
223 G1EvacStats _survivor_evac_stats;
224
225 // GC allocation statistics policy for tenured objects.
226 G1EvacStats _old_evac_stats;
227
228 // Helper for monitoring and management support.
229 G1MonitoringSupport* _monitoring_support;
230
231 // Records whether the region at the given index is (still) a
232 // candidate for eager reclaim. Only valid for humongous start
233 // regions; other regions have unspecified values. Humongous start
234 // regions are initialized at start of collection pause, with
235 // candidates removed from the set as they are found reachable from
236 // roots or the young generation.
237 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
238 protected:
239 bool default_value() const override { return false; }
240 public:
241 void clear() { G1BiasedMappedArray<bool>::clear(); }
242 void set_candidate(uint region, bool value) {
243 set_by_index(region, value);
244 }
245 bool is_candidate(uint region) {
246 return get_by_index(region);
247 }
248 };
249
250 HumongousReclaimCandidates _humongous_reclaim_candidates;
251 uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
252 uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
253 public:
254 uint num_humongous_objects() const { return _num_humongous_objects; }
255 uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
256 bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
257
258 bool should_do_eager_reclaim() const;
259
260 void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
261
262 bool should_sample_collection_set_candidates() const;
263 void set_collection_set_candidates_stats(G1SegmentedArrayMemoryStats& stats);
264 void set_young_gen_card_set_stats(const G1SegmentedArrayMemoryStats& stats);
265
266 private:
267
268 G1HRPrinter _hr_printer;
269
270 // Return true if an explicit GC should start a concurrent cycle instead
271 // of doing a STW full GC. A concurrent cycle should be started if:
272 // (a) cause == _g1_humongous_allocation,
273 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
274 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
275 // (d) cause == _wb_conc_mark or _wb_breakpoint,
276 // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
277 bool should_do_concurrent_full_gc(GCCause::Cause cause);
278
279 // Attempt to start a concurrent cycle with the indicated cause.
280 // precondition: should_do_concurrent_full_gc(cause)
281 bool try_collect_concurrently(GCCause::Cause cause,
282 uint gc_counter,
283 uint old_marking_started_before);
284
285 // indicates whether we are in young or mixed GC mode
|
67 class G1ArchiveAllocator;
68 class G1BatchedTask;
69 class G1CardTableEntryClosure;
70 class G1ConcurrentMark;
71 class G1ConcurrentMarkThread;
72 class G1ConcurrentRefine;
73 class G1GCCounters;
74 class G1GCPhaseTimes;
75 class G1HeapSizingPolicy;
76 class G1HotCardCache;
77 class G1NewTracer;
78 class G1RemSet;
79 class G1ServiceTask;
80 class G1ServiceThread;
81 class GCMemoryManager;
82 class HeapRegion;
83 class MemoryPool;
84 class nmethod;
85 class ReferenceProcessor;
86 class STWGCTimer;
87 class SlidingForwarding;
88 class WorkerThreads;
89
90 typedef OverflowTaskQueue<ScannerTask, mtGC> G1ScannerTasksQueue;
91 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
92
93 typedef int RegionIdx_t; // needs to hold [ 0..max_reserved_regions() )
94 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
95
96 // The G1 STW is alive closure.
97 // An instance is embedded into the G1CH and used as the
98 // (optional) _is_alive_non_header closure in the STW
99 // reference processor. It is also extensively used during
100 // reference processing during STW evacuation pauses.
101 class G1STWIsAliveClosure : public BoolObjectClosure {
102 G1CollectedHeap* _g1h;
103 public:
104 G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
105 bool do_object_b(oop p) override;
106 };
107
212 // Number of bytes used in all regions during GC. Typically changed when
213 // retiring a GC alloc region.
214 size_t _bytes_used_during_gc;
215
216 public:
217 size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
218
219 private:
220 // Class that handles archive allocation ranges.
221 G1ArchiveAllocator* _archive_allocator;
222
223 // GC allocation statistics policy for survivors.
224 G1EvacStats _survivor_evac_stats;
225
226 // GC allocation statistics policy for tenured objects.
227 G1EvacStats _old_evac_stats;
228
229 // Helper for monitoring and management support.
230 G1MonitoringSupport* _monitoring_support;
231
232 SlidingForwarding* _forwarding;
233
234 // Records whether the region at the given index is (still) a
235 // candidate for eager reclaim. Only valid for humongous start
236 // regions; other regions have unspecified values. Humongous start
237 // regions are initialized at start of collection pause, with
238 // candidates removed from the set as they are found reachable from
239 // roots or the young generation.
240 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
241 protected:
242 bool default_value() const override { return false; }
243 public:
244 void clear() { G1BiasedMappedArray<bool>::clear(); }
245 void set_candidate(uint region, bool value) {
246 set_by_index(region, value);
247 }
248 bool is_candidate(uint region) {
249 return get_by_index(region);
250 }
251 };
252
253 HumongousReclaimCandidates _humongous_reclaim_candidates;
254 uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
255 uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
256 public:
257 uint num_humongous_objects() const { return _num_humongous_objects; }
258 uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
259 bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
260
261 bool should_do_eager_reclaim() const;
262
263 void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
264
265 bool should_sample_collection_set_candidates() const;
266 void set_collection_set_candidates_stats(G1SegmentedArrayMemoryStats& stats);
267 void set_young_gen_card_set_stats(const G1SegmentedArrayMemoryStats& stats);
268
269 SlidingForwarding* forwarding() const {
270 return _forwarding;
271 }
272
273 private:
274
275 G1HRPrinter _hr_printer;
276
277 // Return true if an explicit GC should start a concurrent cycle instead
278 // of doing a STW full GC. A concurrent cycle should be started if:
279 // (a) cause == _g1_humongous_allocation,
280 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
281 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
282 // (d) cause == _wb_conc_mark or _wb_breakpoint,
283 // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
284 bool should_do_concurrent_full_gc(GCCause::Cause cause);
285
286 // Attempt to start a concurrent cycle with the indicated cause.
287 // precondition: should_do_concurrent_full_gc(cause)
288 bool try_collect_concurrently(GCCause::Cause cause,
289 uint gc_counter,
290 uint old_marking_started_before);
291
292 // indicates whether we are in young or mixed GC mode
|