70 class G1ParScanThreadState;
71 class MemoryPool;
72 class MemoryManager;
73 class ObjectClosure;
74 class SpaceClosure;
75 class CompactibleSpaceClosure;
76 class Space;
77 class G1BatchedGangTask;
78 class G1CardTableEntryClosure;
79 class G1CollectionSet;
80 class G1Policy;
81 class G1HotCardCache;
82 class G1RemSet;
83 class G1ServiceTask;
84 class G1ServiceThread;
85 class G1ConcurrentMark;
86 class G1ConcurrentMarkThread;
87 class G1ConcurrentRefine;
88 class GenerationCounters;
89 class STWGCTimer;
90 class G1NewTracer;
91 class EvacuationFailedInfo;
92 class nmethod;
93 class WorkGang;
94 class G1Allocator;
95 class G1ArchiveAllocator;
96 class G1FullGCScope;
97 class G1HeapVerifier;
98 class G1HeapSizingPolicy;
99 class G1HeapSummary;
100 class G1EvacSummary;
101
102 typedef OverflowTaskQueue<ScannerTask, mtGC> G1ScannerTasksQueue;
103 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
104
105 typedef int RegionIdx_t; // needs to hold [ 0..max_reserved_regions() )
106 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
107
108 // The G1 STW is alive closure.
109 // An instance is embedded into the G1CH and used as the
220 // Class that handles archive allocation ranges.
221 G1ArchiveAllocator* _archive_allocator;
222
223 // GC allocation statistics policy for survivors.
224 G1EvacStats _survivor_evac_stats;
225
226 // GC allocation statistics policy for tenured objects.
227 G1EvacStats _old_evac_stats;
228
229 // It specifies whether we should attempt to expand the heap after a
230 // region allocation failure. If heap expansion fails we set this to
231 // false so that we don't re-attempt the heap expansion (it's likely
232 // that subsequent expansion attempts will also fail if one fails).
233 // Currently, it is only consulted during GC and it's reset at the
234 // start of each GC.
235 bool _expand_heap_after_alloc_failure;
236
237 // Helper for monitoring and management support.
238 G1MonitoringSupport* _g1mm;
239
240 // Records whether the region at the given index is (still) a
241 // candidate for eager reclaim. Only valid for humongous start
242 // regions; other regions have unspecified values. Humongous start
243 // regions are initialized at start of collection pause, with
244 // candidates removed from the set as they are found reachable from
245 // roots or the young generation.
246 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
247 protected:
248 bool default_value() const { return false; }
249 public:
250 void clear() { G1BiasedMappedArray<bool>::clear(); }
251 void set_candidate(uint region, bool value) {
252 set_by_index(region, value);
253 }
254 bool is_candidate(uint region) {
255 return get_by_index(region);
256 }
257 };
258
259 HumongousReclaimCandidates _humongous_reclaim_candidates;
260 uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
261 uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
262 public:
263 uint num_humongous_objects() const { return _num_humongous_objects; }
264 uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
265 bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
266
267 bool should_do_eager_reclaim() const;
268
269 private:
270
271 G1HRPrinter _hr_printer;
272
273 // Return true if an explicit GC should start a concurrent cycle instead
274 // of doing a STW full GC. A concurrent cycle should be started if:
275 // (a) cause == _g1_humongous_allocation,
276 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
277 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
278 // (d) cause == _wb_conc_mark or _wb_breakpoint,
279 // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
280 bool should_do_concurrent_full_gc(GCCause::Cause cause);
281
282 // Attempt to start a concurrent cycle with the indicated cause.
283 // precondition: should_do_concurrent_full_gc(cause)
284 bool try_collect_concurrently(GCCause::Cause cause,
285 uint gc_counter,
286 uint old_marking_started_before);
287
288 // indicates whether we are in young or mixed GC mode
|
70 class G1ParScanThreadState;
71 class MemoryPool;
72 class MemoryManager;
73 class ObjectClosure;
74 class SpaceClosure;
75 class CompactibleSpaceClosure;
76 class Space;
77 class G1BatchedGangTask;
78 class G1CardTableEntryClosure;
79 class G1CollectionSet;
80 class G1Policy;
81 class G1HotCardCache;
82 class G1RemSet;
83 class G1ServiceTask;
84 class G1ServiceThread;
85 class G1ConcurrentMark;
86 class G1ConcurrentMarkThread;
87 class G1ConcurrentRefine;
88 class GenerationCounters;
89 class STWGCTimer;
90 class SlidingForwarding;
91 class G1NewTracer;
92 class EvacuationFailedInfo;
93 class nmethod;
94 class WorkGang;
95 class G1Allocator;
96 class G1ArchiveAllocator;
97 class G1FullGCScope;
98 class G1HeapVerifier;
99 class G1HeapSizingPolicy;
100 class G1HeapSummary;
101 class G1EvacSummary;
102
103 typedef OverflowTaskQueue<ScannerTask, mtGC> G1ScannerTasksQueue;
104 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
105
106 typedef int RegionIdx_t; // needs to hold [ 0..max_reserved_regions() )
107 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
108
109 // The G1 STW is alive closure.
110 // An instance is embedded into the G1CH and used as the
221 // Class that handles archive allocation ranges.
222 G1ArchiveAllocator* _archive_allocator;
223
224 // GC allocation statistics policy for survivors.
225 G1EvacStats _survivor_evac_stats;
226
227 // GC allocation statistics policy for tenured objects.
228 G1EvacStats _old_evac_stats;
229
230 // It specifies whether we should attempt to expand the heap after a
231 // region allocation failure. If heap expansion fails we set this to
232 // false so that we don't re-attempt the heap expansion (it's likely
233 // that subsequent expansion attempts will also fail if one fails).
234 // Currently, it is only consulted during GC and it's reset at the
235 // start of each GC.
236 bool _expand_heap_after_alloc_failure;
237
238 // Helper for monitoring and management support.
239 G1MonitoringSupport* _g1mm;
240
241 SlidingForwarding* _forwarding;
242
243 // Records whether the region at the given index is (still) a
244 // candidate for eager reclaim. Only valid for humongous start
245 // regions; other regions have unspecified values. Humongous start
246 // regions are initialized at start of collection pause, with
247 // candidates removed from the set as they are found reachable from
248 // roots or the young generation.
249 class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
250 protected:
251 bool default_value() const { return false; }
252 public:
253 void clear() { G1BiasedMappedArray<bool>::clear(); }
254 void set_candidate(uint region, bool value) {
255 set_by_index(region, value);
256 }
257 bool is_candidate(uint region) {
258 return get_by_index(region);
259 }
260 };
261
262 HumongousReclaimCandidates _humongous_reclaim_candidates;
263 uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
264 uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
265 public:
266 uint num_humongous_objects() const { return _num_humongous_objects; }
267 uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
268 bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
269
270 bool should_do_eager_reclaim() const;
271
272 SlidingForwarding* forwarding() const {
273 return _forwarding;
274 }
275
276 private:
277
278 G1HRPrinter _hr_printer;
279
280 // Return true if an explicit GC should start a concurrent cycle instead
281 // of doing a STW full GC. A concurrent cycle should be started if:
282 // (a) cause == _g1_humongous_allocation,
283 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
284 // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
285 // (d) cause == _wb_conc_mark or _wb_breakpoint,
286 // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
287 bool should_do_concurrent_full_gc(GCCause::Cause cause);
288
289 // Attempt to start a concurrent cycle with the indicated cause.
290 // precondition: should_do_concurrent_full_gc(cause)
291 bool try_collect_concurrently(GCCause::Cause cause,
292 uint gc_counter,
293 uint old_marking_started_before);
294
295 // indicates whether we are in young or mixed GC mode
|