45 class ShenandoahCollectorPolicy;
46 class ShenandoahControlThread;
47 class ShenandoahGCSession;
48 class ShenandoahGCStateResetter;
49 class ShenandoahHeuristics;
50 class ShenandoahMarkingContext;
51 class ShenandoahMode;
52 class ShenandoahPhaseTimings;
53 class ShenandoahHeap;
54 class ShenandoahHeapRegion;
55 class ShenandoahHeapRegionClosure;
56 class ShenandoahCollectionSet;
57 class ShenandoahFreeSet;
58 class ShenandoahConcurrentMark;
59 class ShenandoahFullGC;
60 class ShenandoahMonitoringSupport;
61 class ShenandoahPacer;
62 class ShenandoahReferenceProcessor;
63 class ShenandoahVerifier;
64 class ShenandoahWorkGang;
65 class VMStructs;
66
67 // Used for buffering per-region liveness data.
68 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
69 // The ShenandoahHeap array has max-workers elements, each of which is an array of
70 // uint16_t * max_regions. The choice of uint16_t is not accidental:
71 // there is a tradeoff between static/dynamic footprint that translates
72 // into cache pressure (which is already high during marking), and
73 // too many atomic updates. uint32_t is too large, uint8_t is too small.
74 typedef uint16_t ShenandoahLiveData;
75 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
76
77 class ShenandoahRegionIterator : public StackObj {
78 private:
79 ShenandoahHeap* _heap;
80
81 shenandoah_padding(0);
82 volatile size_t _index;
83 shenandoah_padding(1);
84
210 ShenandoahWorkGang* _workers;
211 ShenandoahWorkGang* _safepoint_workers;
212
213 public:
214 uint max_workers();
215 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
216
217 WorkGang* workers() const;
218 WorkGang* safepoint_workers();
219
220 void gc_threads_do(ThreadClosure* tcl) const;
221
222 // ---------- Heap regions handling machinery
223 //
224 private:
225 MemRegion _heap_region;
226 bool _heap_region_special;
227 size_t _num_regions;
228 ShenandoahHeapRegion** _regions;
229 ShenandoahRegionIterator _update_refs_iterator;
230
231 public:
232
233 inline HeapWord* base() const { return _heap_region.start(); }
234
235 inline size_t num_regions() const { return _num_regions; }
236 inline bool is_heap_region_special() { return _heap_region_special; }
237
238 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
239 inline size_t heap_region_index_containing(const void* addr) const;
240
241 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
242
243 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
244 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
245
246 // ---------- GC state machinery
247 //
248 // GC state describes the important parts of collector state, that may be
249 // used to make barrier selection decisions in the native and generated code.
250 // Multiple bits can be set at once.
251 //
252 // Important invariant: when GC state is zero, the heap is stable, and no barriers
253 // are required.
254 //
255 public:
256 enum GCStateBitPos {
257 // Heap has forwarded objects: needs LRB barriers.
258 HAS_FORWARDED_BITPOS = 0,
259
260 // Heap is under marking: needs SATB barriers.
261 MARKING_BITPOS = 1,
262
263 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
264 EVACUATION_BITPOS = 2,
265
|
45 class ShenandoahCollectorPolicy;
46 class ShenandoahControlThread;
47 class ShenandoahGCSession;
48 class ShenandoahGCStateResetter;
49 class ShenandoahHeuristics;
50 class ShenandoahMarkingContext;
51 class ShenandoahMode;
52 class ShenandoahPhaseTimings;
53 class ShenandoahHeap;
54 class ShenandoahHeapRegion;
55 class ShenandoahHeapRegionClosure;
56 class ShenandoahCollectionSet;
57 class ShenandoahFreeSet;
58 class ShenandoahConcurrentMark;
59 class ShenandoahFullGC;
60 class ShenandoahMonitoringSupport;
61 class ShenandoahPacer;
62 class ShenandoahReferenceProcessor;
63 class ShenandoahVerifier;
64 class ShenandoahWorkGang;
65 class SlidingForwarding;
66 class VMStructs;
67
68 // Used for buffering per-region liveness data.
69 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
70 // The ShenandoahHeap array has max-workers elements, each of which is an array of
71 // uint16_t * max_regions. The choice of uint16_t is not accidental:
72 // there is a tradeoff between static/dynamic footprint that translates
73 // into cache pressure (which is already high during marking), and
74 // too many atomic updates. uint32_t is too large, uint8_t is too small.
75 typedef uint16_t ShenandoahLiveData;
76 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
77
78 class ShenandoahRegionIterator : public StackObj {
79 private:
80 ShenandoahHeap* _heap;
81
82 shenandoah_padding(0);
83 volatile size_t _index;
84 shenandoah_padding(1);
85
211 ShenandoahWorkGang* _workers;
212 ShenandoahWorkGang* _safepoint_workers;
213
214 public:
215 uint max_workers();
216 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
217
218 WorkGang* workers() const;
219 WorkGang* safepoint_workers();
220
221 void gc_threads_do(ThreadClosure* tcl) const;
222
223 // ---------- Heap regions handling machinery
224 //
225 private:
226 MemRegion _heap_region;
227 bool _heap_region_special;
228 size_t _num_regions;
229 ShenandoahHeapRegion** _regions;
230 ShenandoahRegionIterator _update_refs_iterator;
231 SlidingForwarding* _forwarding;
232
233 public:
234
235 inline HeapWord* base() const { return _heap_region.start(); }
236
237 inline size_t num_regions() const { return _num_regions; }
238 inline bool is_heap_region_special() { return _heap_region_special; }
239
240 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
241 inline size_t heap_region_index_containing(const void* addr) const;
242
243 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
244
245 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
246 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
247
248 SlidingForwarding* forwarding() const { return _forwarding; }
249
250 // ---------- GC state machinery
251 //
252 // GC state describes the important parts of collector state, that may be
253 // used to make barrier selection decisions in the native and generated code.
254 // Multiple bits can be set at once.
255 //
256 // Important invariant: when GC state is zero, the heap is stable, and no barriers
257 // are required.
258 //
259 public:
260 enum GCStateBitPos {
261 // Heap has forwarded objects: needs LRB barriers.
262 HAS_FORWARDED_BITPOS = 0,
263
264 // Heap is under marking: needs SATB barriers.
265 MARKING_BITPOS = 1,
266
267 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
268 EVACUATION_BITPOS = 2,
269
|