12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
28
29 #include "gc/shared/markBitMap.hpp"
30 #include "gc/shared/softRefPolicy.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shenandoah/shenandoahAsserts.hpp"
33 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
34 #include "gc/shenandoah/shenandoahLock.hpp"
35 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
36 #include "gc/shenandoah/shenandoahPadding.hpp"
37 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
38 #include "gc/shenandoah/shenandoahUnload.hpp"
39 #include "memory/metaspace.hpp"
40 #include "services/memoryManager.hpp"
41 #include "utilities/globalDefinitions.hpp"
42 #include "utilities/stack.hpp"
43
44 class ConcurrentGCTimer;
45 class ObjectIterateScanRootClosure;
46 class ShenandoahCollectorPolicy;
47 class ShenandoahControlThread;
48 class ShenandoahGCSession;
49 class ShenandoahGCStateResetter;
50 class ShenandoahHeuristics;
51 class ShenandoahMarkingContext;
52 class ShenandoahMode;
53 class ShenandoahPhaseTimings;
54 class ShenandoahHeap;
55 class ShenandoahHeapRegion;
56 class ShenandoahHeapRegionClosure;
57 class ShenandoahCollectionSet;
58 class ShenandoahFreeSet;
59 class ShenandoahConcurrentMark;
60 class ShenandoahFullGC;
61 class ShenandoahMonitoringSupport;
62 class ShenandoahPacer;
63 class ShenandoahReferenceProcessor;
64 class ShenandoahVerifier;
65 class ShenandoahWorkerThreads;
66 class VMStructs;
67
68 // Used for buffering per-region liveness data.
69 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
70 // The ShenandoahHeap array has max-workers elements, each of which is an array of
71 // uint16_t * max_regions. The choice of uint16_t is not accidental:
72 // there is a tradeoff between static/dynamic footprint that translates
91 ShenandoahRegionIterator(ShenandoahHeap* heap);
92
93 // Reset iterator to default state
94 void reset();
95
96 // Returns next region, or null if there are no more regions.
97 // This is multi-thread-safe.
98 inline ShenandoahHeapRegion* next();
99
100 // This is *not* MT safe. However, in the absence of multithreaded access, it
101 // can be used to determine if there is more work to do.
102 bool has_next() const;
103 };
104
105 class ShenandoahHeapRegionClosure : public StackObj {
106 public:
107 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
108 virtual bool is_thread_safe() { return false; }
109 };
110
111 typedef ShenandoahLock ShenandoahHeapLock;
112 typedef ShenandoahLocker ShenandoahHeapLocker;
113 typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
114
115 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
116 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
117 // See ShenandoahControlThread for GC cycle structure.
118 //
119 class ShenandoahHeap : public CollectedHeap {
120 friend class ShenandoahAsserts;
121 friend class VMStructs;
122 friend class ShenandoahGCSession;
123 friend class ShenandoahGCStateResetter;
124 friend class ShenandoahParallelObjectIterator;
125 friend class ShenandoahSafepoint;
126 // Supported GC
127 friend class ShenandoahConcurrentGC;
128 friend class ShenandoahDegenGC;
129 friend class ShenandoahFullGC;
130 friend class ShenandoahUnload;
131
132 // ---------- Locks that guard important data structures in Heap
133 //
134 private:
135 ShenandoahHeapLock _lock;
136
137 public:
138 ShenandoahHeapLock* lock() {
139 return &_lock;
140 }
141
142 // ---------- Initialization, termination, identification, printing routines
143 //
144 public:
145 static ShenandoahHeap* heap();
146
147 const char* name() const override { return "Shenandoah"; }
148 ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
149
150 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
151 jint initialize() override;
152 void post_initialize() override;
153 void initialize_mode();
154 void initialize_heuristics();
155
156 void initialize_serviceability() override;
157
158 void print_on(outputStream* st) const override;
159 void print_extended_on(outputStream *st) const override;
160 void print_tracing_info() const override;
161 void print_heap_regions_on(outputStream* st) const;
162
163 void stop() override;
164
165 void prepare_for_verify() override;
166 void verify(VerifyOption vo) override;
167
168 // WhiteBox testing support.
169 bool supports_concurrent_gc_breakpoints() const override {
170 return true;
171 }
172
173 // ---------- Heap counters and metrics
174 //
175 private:
176 size_t _initial_size;
177 size_t _minimum_size;
178 volatile size_t _soft_max_size;
179 shenandoah_padding(0);
180 volatile size_t _used;
181 volatile size_t _committed;
182 volatile size_t _bytes_allocated_since_gc_start;
183 shenandoah_padding(1);
184
185 public:
186 void increase_used(size_t bytes);
187 void decrease_used(size_t bytes);
188 void set_used(size_t bytes);
189
190 void increase_committed(size_t bytes);
191 void decrease_committed(size_t bytes);
192 void increase_allocated(size_t bytes);
193
194 size_t bytes_allocated_since_gc_start();
195 void reset_bytes_allocated_since_gc_start();
196
197 size_t min_capacity() const;
198 size_t max_capacity() const override;
199 size_t soft_max_capacity() const;
200 size_t initial_capacity() const;
201 size_t capacity() const override;
202 size_t used() const override;
203 size_t committed() const;
204
205 void set_soft_max_capacity(size_t v);
206
207 // ---------- Workers handling
208 //
209 private:
210 uint _max_workers;
211 ShenandoahWorkerThreads* _workers;
212 ShenandoahWorkerThreads* _safepoint_workers;
213
214 public:
215 uint max_workers();
216 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
217
218 WorkerThreads* workers() const;
219 WorkerThreads* safepoint_workers() override;
220
221 void gc_threads_do(ThreadClosure* tcl) const override;
222
223 // ---------- Heap regions handling machinery
224 //
225 private:
226 MemRegion _heap_region;
227 bool _heap_region_special;
228 size_t _num_regions;
229 ShenandoahHeapRegion** _regions;
230 ShenandoahRegionIterator _update_refs_iterator;
231
232 public:
233
234 inline HeapWord* base() const { return _heap_region.start(); }
235
236 inline size_t num_regions() const { return _num_regions; }
237 inline bool is_heap_region_special() { return _heap_region_special; }
238
239 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
240 inline size_t heap_region_index_containing(const void* addr) const;
241
242 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
243
244 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
245 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
246
247 // ---------- GC state machinery
248 //
249 // GC state describes the important parts of collector state, that may be
250 // used to make barrier selection decisions in the native and generated code.
251 // Multiple bits can be set at once.
252 //
253 // Important invariant: when GC state is zero, the heap is stable, and no barriers
254 // are required.
255 //
256 public:
257 enum GCStateBitPos {
258 // Heap has forwarded objects: needs LRB barriers.
259 HAS_FORWARDED_BITPOS = 0,
260
261 // Heap is under marking: needs SATB barriers.
262 MARKING_BITPOS = 1,
263
264 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
265 EVACUATION_BITPOS = 2,
266
267 // Heap is under updating: needs no additional barriers.
268 UPDATEREFS_BITPOS = 3,
269
270 // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
271 WEAK_ROOTS_BITPOS = 4,
272 };
273
274 enum GCState {
275 STABLE = 0,
276 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
277 MARKING = 1 << MARKING_BITPOS,
278 EVACUATION = 1 << EVACUATION_BITPOS,
279 UPDATEREFS = 1 << UPDATEREFS_BITPOS,
280 WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
281 };
282
283 private:
284 ShenandoahSharedBitmap _gc_state;
285 ShenandoahSharedFlag _degenerated_gc_in_progress;
286 ShenandoahSharedFlag _full_gc_in_progress;
287 ShenandoahSharedFlag _full_gc_move_in_progress;
288 ShenandoahSharedFlag _progress_last_gc;
289 ShenandoahSharedFlag _concurrent_strong_root_in_progress;
290
291 void set_gc_state_all_threads(char state);
292 void set_gc_state_mask(uint mask, bool value);
293
294 public:
295 char gc_state() const;
296 static address gc_state_addr();
297
298 void set_concurrent_mark_in_progress(bool in_progress);
299 void set_evacuation_in_progress(bool in_progress);
300 void set_update_refs_in_progress(bool in_progress);
301 void set_degenerated_gc_in_progress(bool in_progress);
302 void set_full_gc_in_progress(bool in_progress);
303 void set_full_gc_move_in_progress(bool in_progress);
304 void set_has_forwarded_objects(bool cond);
305 void set_concurrent_strong_root_in_progress(bool cond);
306 void set_concurrent_weak_root_in_progress(bool cond);
307
308 inline bool is_stable() const;
309 inline bool is_idle() const;
310 inline bool is_concurrent_mark_in_progress() const;
311 inline bool is_update_refs_in_progress() const;
312 inline bool is_evacuation_in_progress() const;
313 inline bool is_degenerated_gc_in_progress() const;
314 inline bool is_full_gc_in_progress() const;
315 inline bool is_full_gc_move_in_progress() const;
316 inline bool has_forwarded_objects() const;
317 inline bool is_gc_in_progress_mask(uint mask) const;
318 inline bool is_stw_gc_in_progress() const;
319 inline bool is_concurrent_strong_root_in_progress() const;
320 inline bool is_concurrent_weak_root_in_progress() const;
321
322 private:
323 enum CancelState {
324 // Normal state. GC has not been cancelled and is open for cancellation.
325 // Worker threads can suspend for safepoint.
326 CANCELLABLE,
327
328 // GC has been cancelled. Worker threads can not suspend for
329 // safepoint but must finish their work as soon as possible.
330 CANCELLED,
331
332 // GC has not been cancelled and must not be cancelled. At least
333 // one worker thread checks for pending safepoint and may suspend
334 // if a safepoint is pending.
335 NOT_CANCELLED
336 };
337
338 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
339 bool try_cancel_gc();
340
341 public:
342 static address cancelled_gc_addr();
343
344 inline bool cancelled_gc() const;
345 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
346
347 inline void clear_cancelled_gc();
348
349 void cancel_gc(GCCause::Cause cause);
350
351 public:
352 // Elastic heap support
353 void entry_uncommit(double shrink_before, size_t shrink_until);
354 void op_uncommit(double shrink_before, size_t shrink_until);
355
356 private:
357 // GC support
358 // Reset bitmap, prepare regions for new GC cycle
359 void prepare_gc();
360 void prepare_regions_and_collection_set(bool concurrent);
361 // Evacuation
362 void prepare_evacuation(bool concurrent);
363 void evacuate_collection_set(bool concurrent);
364 // Concurrent root processing
365 void prepare_concurrent_roots();
366 void finish_concurrent_roots();
367 // Concurrent class unloading support
368 void do_class_unloading();
369 // Reference updating
370 void prepare_update_heap_references(bool concurrent);
371 void update_heap_references(bool concurrent);
372 // Final update region states
373 void update_heap_region_states(bool concurrent);
374 void rebuild_free_set(bool concurrent);
375
376 void rendezvous_threads();
377 void recycle_trash();
378 public:
379 void notify_gc_progress() { _progress_last_gc.set(); }
380 void notify_gc_no_progress() { _progress_last_gc.unset(); }
381
382 //
383 // Mark support
384 private:
385 ShenandoahControlThread* _control_thread;
386 ShenandoahCollectorPolicy* _shenandoah_policy;
387 ShenandoahMode* _gc_mode;
388 ShenandoahHeuristics* _heuristics;
389 ShenandoahFreeSet* _free_set;
390 ShenandoahPacer* _pacer;
391 ShenandoahVerifier* _verifier;
392
393 ShenandoahPhaseTimings* _phase_timings;
394
395 ShenandoahControlThread* control_thread() { return _control_thread; }
396
397 public:
398 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
399 ShenandoahMode* mode() const { return _gc_mode; }
400 ShenandoahHeuristics* heuristics() const { return _heuristics; }
401 ShenandoahFreeSet* free_set() const { return _free_set; }
402 ShenandoahPacer* pacer() const { return _pacer; }
403
404 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
405
406 ShenandoahVerifier* verifier();
407
408 // ---------- VM subsystem bindings
409 //
410 private:
411 ShenandoahMonitoringSupport* _monitoring_support;
412 MemoryPool* _memory_pool;
413 GCMemoryManager _stw_memory_manager;
414 GCMemoryManager _cycle_memory_manager;
415 ConcurrentGCTimer* _gc_timer;
416 SoftRefPolicy _soft_ref_policy;
417
418 // For exporting to SA
419 int _log_min_obj_alignment_in_bytes;
420 public:
421 ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
422 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
423 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
424 SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
425
426 GrowableArray<GCMemoryManager*> memory_managers() override;
427 GrowableArray<MemoryPool*> memory_pools() override;
428 MemoryUsage memory_usage() override;
429 GCTracer* tracer();
430 ConcurrentGCTimer* gc_timer() const;
431
432 // ---------- Reference processing
433 //
434 private:
435 ShenandoahReferenceProcessor* const _ref_processor;
436
437 public:
438 ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
439
440 // ---------- Class Unloading
441 //
442 private:
443 ShenandoahSharedFlag _unload_classes;
444 ShenandoahUnload _unloader;
445
446 public:
447 void set_unload_classes(bool uc);
448 bool unload_classes() const;
449
450 // Perform STW class unloading and weak root cleaning
451 void parallel_cleaning(bool full_gc);
452
453 private:
454 void stw_unload_classes(bool full_gc);
455 void stw_process_weak_roots(bool full_gc);
456 void stw_weak_refs(bool full_gc);
457
458 // Heap iteration support
459 void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
460 bool prepare_aux_bitmap_for_iteration();
461 void reclaim_aux_bitmap_for_iteration();
462
463 // ---------- Generic interface hooks
464 // Minor things that super-interface expects us to implement to play nice with
465 // the rest of runtime. Some of the things here are not required to be implemented,
466 // and can be stubbed out.
467 //
468 public:
469 bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
470
471 bool is_in(const void* p) const override;
472
473 bool requires_barriers(stackChunkOop obj) const override;
474
475 MemRegion reserved_region() const { return _reserved; }
476 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
477
478 void collect(GCCause::Cause cause) override;
479 void do_full_collection(bool clear_all_soft_refs) override;
480
481 // Used for parsing heap during error printing
482 HeapWord* block_start(const void* addr) const;
483 bool block_is_obj(const HeapWord* addr) const;
484 bool print_location(outputStream* st, void* addr) const override;
485
486 // Used for native heap walkers: heap dumpers, mostly
487 void object_iterate(ObjectClosure* cl) override;
488 // Parallel heap iteration support
489 ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
490
491 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
505 void verify_nmethod(nmethod* nm) override {}
506
507 // ---------- Pinning hooks
508 //
509 public:
510 // Shenandoah supports per-object (per-region) pinning
511 void pin_object(JavaThread* thread, oop obj) override;
512 void unpin_object(JavaThread* thread, oop obj) override;
513
514 void sync_pinned_region_status();
515 void assert_pinned_region_status() NOT_DEBUG_RETURN;
516
517 // ---------- Concurrent Stack Processing support
518 //
519 public:
520 bool uses_stack_watermark_barrier() const override { return true; }
521
522 // ---------- Allocation support
523 //
524 private:
525 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
526 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
527 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
528 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
529
530 public:
531 HeapWord* allocate_memory(ShenandoahAllocRequest& request);
532 HeapWord* mem_allocate(size_t size, bool* what) override;
533 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
534 size_t size,
535 Metaspace::MetadataType mdtype) override;
536
537 void notify_mutator_alloc_words(size_t words, bool waste);
538
539 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
540 size_t tlab_capacity(Thread *thr) const override;
541 size_t unsafe_max_tlab_alloc(Thread *thread) const override;
542 size_t max_tlab_size() const override;
543 size_t tlab_used(Thread* ignored) const override;
544
545 void ensure_parsability(bool retire_labs) override;
546
547 void labs_make_parsable();
548 void tlabs_retire(bool resize);
549 void gclabs_retire(bool resize);
550
551 // ---------- Marking support
552 //
553 private:
554 ShenandoahMarkingContext* _marking_context;
555 MemRegion _bitmap_region;
556 MemRegion _aux_bitmap_region;
557 MarkBitMap _verification_bit_map;
558 MarkBitMap _aux_bit_map;
559
560 size_t _bitmap_size;
561 size_t _bitmap_regions_per_slice;
562 size_t _bitmap_bytes_per_slice;
563
564 size_t _pretouch_heap_page_size;
565 size_t _pretouch_bitmap_page_size;
566
567 bool _bitmap_region_special;
568 bool _aux_bitmap_region_special;
569
570 ShenandoahLiveData** _liveness_cache;
571
572 public:
573 inline ShenandoahMarkingContext* complete_marking_context() const;
574 inline ShenandoahMarkingContext* marking_context() const;
575 inline void mark_complete_marking_context();
576 inline void mark_incomplete_marking_context();
577
578 template<class T>
579 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
580
581 template<class T>
582 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
583
584 template<class T>
585 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
586
587 void reset_mark_bitmap();
588
589 // SATB barriers hooks
590 inline bool requires_marking(const void* entry) const;
591
592 // Support for bitmap uncommits
593 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
594 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
595 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
596
597 // Liveness caching support
598 ShenandoahLiveData* get_liveness_cache(uint worker_id);
599 void flush_liveness_cache(uint worker_id);
600
601 size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
602
603 // ---------- Evacuation support
604 //
605 private:
606 ShenandoahCollectionSet* _collection_set;
607 ShenandoahEvacOOMHandler _oom_evac_handler;
608
609 public:
610 static address in_cset_fast_test_addr();
611
612 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
613
614 // Checks if object is in the collection set.
615 inline bool in_collection_set(oop obj) const;
616
617 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
618 inline bool in_collection_set_loc(void* loc) const;
619
620 // Evacuates object src. Returns the evacuated object, either evacuated
621 // by this thread, or by some other thread.
622 inline oop evacuate_object(oop src, Thread* thread);
623
624 // Call before/after evacuation.
625 inline void enter_evacuation(Thread* t);
626 inline void leave_evacuation(Thread* t);
627
628 // ---------- Helper functions
629 //
630 public:
631 template <class T>
632 inline void conc_update_with_forwarded(T* p);
633
634 template <class T>
635 inline void update_with_forwarded(T* p);
636
637 static inline void atomic_update_oop(oop update, oop* addr, oop compare);
638 static inline void atomic_update_oop(oop update, narrowOop* addr, oop compare);
639 static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
640
641 static inline bool atomic_update_oop_check(oop update, oop* addr, oop compare);
642 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, oop compare);
643 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
644
645 static inline void atomic_clear_oop( oop* addr, oop compare);
646 static inline void atomic_clear_oop(narrowOop* addr, oop compare);
647 static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
648
649 void trash_humongous_region_at(ShenandoahHeapRegion *r);
650
651 private:
652 void trash_cset_regions();
653
654 // ---------- Testing helpers functions
655 //
656 private:
657 ShenandoahSharedFlag _inject_alloc_failure;
658
659 void try_inject_alloc_failure();
660 bool should_inject_alloc_failure();
661 };
662
663 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
28
29 #include "gc/shared/markBitMap.hpp"
30 #include "gc/shared/softRefPolicy.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shenandoah/mode/shenandoahGenerationalMode.hpp"
33 #include "gc/shenandoah/shenandoahAsserts.hpp"
34 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
35 #include "gc/shenandoah/shenandoahLock.hpp"
36 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
37 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
38 #include "gc/shenandoah/shenandoahMmuTracker.hpp"
39 #include "gc/shenandoah/shenandoahPadding.hpp"
40 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
41 #include "gc/shenandoah/shenandoahUnload.hpp"
42 #include "gc/shenandoah/shenandoahScanRemembered.hpp"
43 #include "memory/metaspace.hpp"
44 #include "services/memoryManager.hpp"
45 #include "utilities/globalDefinitions.hpp"
46 #include "utilities/stack.hpp"
47
48 class ConcurrentGCTimer;
49 class ObjectIterateScanRootClosure;
50 class PLAB;
51 class ShenandoahCollectorPolicy;
52 class ShenandoahControlThread;
53 class ShenandoahRegulatorThread;
54 class ShenandoahGCSession;
55 class ShenandoahGCStateResetter;
56 class ShenandoahGeneration;
57 class ShenandoahYoungGeneration;
58 class ShenandoahOldGeneration;
59 class ShenandoahHeuristics;
60 class ShenandoahOldHeuristics;
61 class ShenandoahMarkingContext;
62 class ShenandoahPhaseTimings;
63 class ShenandoahHeap;
64 class ShenandoahHeapRegion;
65 class ShenandoahHeapRegionClosure;
66 class ShenandoahCollectionSet;
67 class ShenandoahFreeSet;
68 class ShenandoahConcurrentMark;
69 class ShenandoahFullGC;
70 class ShenandoahMonitoringSupport;
71 class ShenandoahPacer;
72 class ShenandoahReferenceProcessor;
73 class ShenandoahVerifier;
74 class ShenandoahWorkerThreads;
75 class VMStructs;
76
77 // Used for buffering per-region liveness data.
78 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
79 // The ShenandoahHeap array has max-workers elements, each of which is an array of
80 // uint16_t * max_regions. The choice of uint16_t is not accidental:
81 // there is a tradeoff between static/dynamic footprint that translates
100 ShenandoahRegionIterator(ShenandoahHeap* heap);
101
102 // Reset iterator to default state
103 void reset();
104
105 // Returns next region, or null if there are no more regions.
106 // This is multi-thread-safe.
107 inline ShenandoahHeapRegion* next();
108
109 // This is *not* MT safe. However, in the absence of multithreaded access, it
110 // can be used to determine if there is more work to do.
111 bool has_next() const;
112 };
113
114 class ShenandoahHeapRegionClosure : public StackObj {
115 public:
116 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
117 virtual bool is_thread_safe() { return false; }
118 };
119
120 template<GenerationMode GENERATION>
121 class ShenandoahGenerationRegionClosure : public ShenandoahHeapRegionClosure {
122 public:
123 explicit ShenandoahGenerationRegionClosure(ShenandoahHeapRegionClosure* cl) : _cl(cl) {}
124 void heap_region_do(ShenandoahHeapRegion* r);
125 virtual bool is_thread_safe() { return _cl->is_thread_safe(); }
126 private:
127 ShenandoahHeapRegionClosure* _cl;
128 };
129
130 typedef ShenandoahLock ShenandoahHeapLock;
131 typedef ShenandoahLocker ShenandoahHeapLocker;
132 typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
133
134 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
135 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
136 // See ShenandoahControlThread for GC cycle structure.
137 //
138 class ShenandoahHeap : public CollectedHeap {
139 friend class ShenandoahAsserts;
140 friend class VMStructs;
141 friend class ShenandoahGCSession;
142 friend class ShenandoahGCStateResetter;
143 friend class ShenandoahParallelObjectIterator;
144 friend class ShenandoahSafepoint;
145 // Supported GC
146 friend class ShenandoahConcurrentGC;
147 friend class ShenandoahOldGC;
148 friend class ShenandoahDegenGC;
149 friend class ShenandoahFullGC;
150 friend class ShenandoahUnload;
151
152 // ---------- Locks that guard important data structures in Heap
153 //
154 private:
155 ShenandoahHeapLock _lock;
156 ShenandoahGeneration* _gc_generation;
157
158 // true iff we are concurrently coalescing and filling old-gen HeapRegions
159 bool _prepare_for_old_mark;
160
161 public:
162 ShenandoahHeapLock* lock() {
163 return &_lock;
164 }
165
166 ShenandoahGeneration* active_generation() const {
167 // last or latest generation might be a better name here.
168 return _gc_generation;
169 }
170
171 void set_gc_generation(ShenandoahGeneration* generation) {
172 _gc_generation = generation;
173 }
174
175 ShenandoahOldHeuristics* old_heuristics();
176
177 bool doing_mixed_evacuations();
178 bool is_old_bitmap_stable() const;
179 bool is_gc_generation_young() const;
180
181 // ---------- Initialization, termination, identification, printing routines
182 //
183 public:
184 static ShenandoahHeap* heap();
185
186 const char* name() const override { return "Shenandoah"; }
187 ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
188
189 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
190 jint initialize() override;
191 void post_initialize() override;
192 void initialize_mode();
193 void initialize_heuristics();
194 void initialize_generations();
195
196 void initialize_serviceability() override;
197
198 void print_on(outputStream* st) const override;
199 void print_extended_on(outputStream *st) const override;
200 void print_tracing_info() const override;
201 void print_heap_regions_on(outputStream* st) const;
202
203 void stop() override;
204
205 void prepare_for_verify() override;
206 void verify(VerifyOption vo) override;
207 void verify_rem_set_at_mark();
208 void verify_rem_set_at_update_ref();
209 void verify_rem_set_after_full_gc();
210
211 // WhiteBox testing support.
212 bool supports_concurrent_gc_breakpoints() const override {
213 return true;
214 }
215
216 // ---------- Heap counters and metrics
217 //
218 private:
219 size_t _initial_size;
220 size_t _minimum_size;
221 volatile size_t _soft_max_size;
222 shenandoah_padding(0);
223 volatile size_t _used;
224 volatile size_t _committed;
225 shenandoah_padding(1);
226
227 void help_verify_region_rem_set(ShenandoahHeapRegion* r, ShenandoahMarkingContext* ctx,
228 HeapWord* from, HeapWord* top, HeapWord* update_watermark, const char* message);
229
230 public:
231 void increase_used(size_t bytes);
232 void decrease_used(size_t bytes);
233 void set_used(size_t bytes);
234
235 void increase_committed(size_t bytes);
236 void decrease_committed(size_t bytes);
237
238 void reset_bytes_allocated_since_gc_start();
239
240 size_t min_capacity() const;
241 size_t max_capacity() const override;
242 size_t soft_max_capacity() const;
243 size_t initial_capacity() const;
244 size_t capacity() const override;
245 size_t used() const override;
246 size_t committed() const;
247
248 void set_soft_max_capacity(size_t v);
249
250 // ---------- Workers handling
251 //
252 private:
253 uint _max_workers;
254 ShenandoahWorkerThreads* _workers;
255 ShenandoahWorkerThreads* _safepoint_workers;
256
257 public:
258 uint max_workers();
259 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
260
261 WorkerThreads* workers() const;
262 WorkerThreads* safepoint_workers() override;
263
264 void gc_threads_do(ThreadClosure* tcl) const override;
265
266 // ---------- Heap regions handling machinery
267 //
268 private:
269 MemRegion _heap_region;
270 bool _heap_region_special;
271 size_t _num_regions;
272 ShenandoahHeapRegion** _regions;
273 uint8_t* _affiliations; // Holds array of enum ShenandoahRegionAffiliation, including FREE status in non-generational mode
274 ShenandoahRegionIterator _update_refs_iterator;
275
276 public:
277
278 inline HeapWord* base() const { return _heap_region.start(); }
279
280 inline size_t num_regions() const { return _num_regions; }
281 inline bool is_heap_region_special() { return _heap_region_special; }
282
283 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
284 inline size_t heap_region_index_containing(const void* addr) const;
285
286 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
287
288 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
289 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
290
291 // ---------- GC state machinery
292 //
293 // GC state describes the important parts of collector state, that may be
294 // used to make barrier selection decisions in the native and generated code.
295 // Multiple bits can be set at once.
296 //
297 // Important invariant: when GC state is zero, the heap is stable, and no barriers
298 // are required.
299 //
300 public:
301 enum GCStateBitPos {
302 // Heap has forwarded objects: needs LRB barriers.
303 HAS_FORWARDED_BITPOS = 0,
304
305 // Young regions are under marking: needs SATB barriers.
306 YOUNG_MARKING_BITPOS = 1,
307
308 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
309 EVACUATION_BITPOS = 2,
310
311 // Heap is under updating: needs no additional barriers.
312 UPDATEREFS_BITPOS = 3,
313
314 // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
315 WEAK_ROOTS_BITPOS = 4,
316
317 // Old regions are under marking, still need SATB barriers.
318 OLD_MARKING_BITPOS = 5
319 };
320
321 enum GCState {
322 STABLE = 0,
323 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
324 YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS,
325 EVACUATION = 1 << EVACUATION_BITPOS,
326 UPDATEREFS = 1 << UPDATEREFS_BITPOS,
327 WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
328 OLD_MARKING = 1 << OLD_MARKING_BITPOS
329 };
330
331 private:
332 ShenandoahSharedBitmap _gc_state;
333 ShenandoahSharedFlag _degenerated_gc_in_progress;
334 ShenandoahSharedFlag _full_gc_in_progress;
335 ShenandoahSharedFlag _full_gc_move_in_progress;
336 ShenandoahSharedFlag _progress_last_gc;
337 ShenandoahSharedFlag _concurrent_strong_root_in_progress;
338
339 // _alloc_supplement_reserve is a supplemental budget for new_memory allocations. During evacuation and update-references,
340 // mutator allocation requests are "authorized" iff young_gen->available() plus _alloc_supplement_reserve minus
341 // _young_evac_reserve is greater than request size. The values of _alloc_supplement_reserve and _young_evac_reserve
342 // are zero except during evacuation and update-reference phases of GC. Both of these values are established at
343 // the start of evacuation, and they remain constant throughout the duration of these two phases of GC. Since these
344 // two values are constant throughout each GC phases, we introduce a new service into ShenandoahGeneration. This service
345 // provides adjusted_available() based on an adjusted capacity. At the start of evacuation, we adjust young capacity by
346 // adding the amount to be borrowed from old-gen and subtracting the _young_evac_reserve, we adjust old capacity by
347 // subtracting the amount to be loaned to young-gen.
348 //
349 // We always use adjusted capacities to determine permission to allocate within young and to promote into old. Note
350 // that adjusted capacities equal traditional capacities except during evacuation and update refs.
351 //
352 // During evacuation, we assure that _old_evac_expended does not exceed _old_evac_reserve.
353 //
354 // At the end of update references, we perform the following bookkeeping activities:
355 //
356 // 1. Unadjust the capacity within young-gen and old-gen to undo the effects of borrowing memory from old-gen. Note that
357 // the entirety of the collection set is now available, so allocation capacity naturally increase at this time.
358 // 2. Clear (reset to zero) _alloc_supplement_reserve, _young_evac_reserve, _old_evac_reserve, and _promoted_reserve
359 //
360 // _young_evac_reserve and _old_evac_reserve are only non-zero during evacuation and update-references.
361 //
362 // Allocation of old GCLABs assures that _old_evac_expended + request-size < _old_evac_reserved. If the allocation
363 // is authorized, increment _old_evac_expended by request size. This allocation ignores old_gen->available().
364 //
365 // Note that the typical total expenditure on evacuation is less than the associated evacuation reserve because we generally
366 // reserve ShenandoahEvacWaste (> 1.0) times the anticipated evacuation need. In the case that there is an excessive amount
367 // of waste, it may be that one thread fails to grab a new GCLAB, this does not necessarily doom the associated evacuation
368 // effort. If this happens, the requesting thread blocks until some other thread manages to evacuate the offending object.
369 // Only after "all" threads fail to evacuate an object do we consider the evacuation effort to have failed.
370
371 intptr_t _alloc_supplement_reserve; // Bytes reserved for young allocations during evac and update refs
372 size_t _promoted_reserve; // Bytes reserved within old-gen to hold the results of promotion
373 volatile size_t _promoted_expended; // Bytes of old-gen memory expended on promotions
374
375 size_t _old_evac_reserve; // Bytes reserved within old-gen to hold evacuated objects from old-gen collection set
376 volatile size_t _old_evac_expended; // Bytes of old-gen memory expended on old-gen evacuations
377
378 size_t _young_evac_reserve; // Bytes reserved within young-gen to hold evacuated objects from young-gen collection set
379
380 size_t _captured_old_usage; // What was old usage (bytes) when last captured?
381
382 size_t _previous_promotion; // Bytes promoted during previous evacuation
383
384 bool _upgraded_to_full;
385
386 void set_gc_state_all_threads(char state);
387 void set_gc_state_mask(uint mask, bool value);
388
389
390
391 public:
392 char gc_state() const;
393 static address gc_state_addr();
394
395 void set_concurrent_young_mark_in_progress(bool in_progress);
396 void set_concurrent_old_mark_in_progress(bool in_progress);
397 void set_evacuation_in_progress(bool in_progress);
398 void set_update_refs_in_progress(bool in_progress);
399 void set_degenerated_gc_in_progress(bool in_progress);
400 void set_full_gc_in_progress(bool in_progress);
401 void set_full_gc_move_in_progress(bool in_progress);
402 void set_has_forwarded_objects(bool cond);
403 void set_concurrent_strong_root_in_progress(bool cond);
404 void set_concurrent_weak_root_in_progress(bool cond);
405 void set_prepare_for_old_mark_in_progress(bool cond);
406 void set_aging_cycle(bool cond);
407
408 inline bool is_stable() const;
409 inline bool is_idle() const;
410 inline bool is_concurrent_mark_in_progress() const;
411 inline bool is_concurrent_young_mark_in_progress() const;
412 inline bool is_concurrent_old_mark_in_progress() const;
413 inline bool is_update_refs_in_progress() const;
414 inline bool is_evacuation_in_progress() const;
415 inline bool is_degenerated_gc_in_progress() const;
416 inline bool is_full_gc_in_progress() const;
417 inline bool is_full_gc_move_in_progress() const;
418 inline bool has_forwarded_objects() const;
419 inline bool is_gc_in_progress_mask(uint mask) const;
420 inline bool is_stw_gc_in_progress() const;
421 inline bool is_concurrent_strong_root_in_progress() const;
422 inline bool is_concurrent_weak_root_in_progress() const;
423 inline bool is_prepare_for_old_mark_in_progress() const;
424 inline bool is_aging_cycle() const;
425 inline bool upgraded_to_full() { return _upgraded_to_full; }
426 inline void start_conc_gc() { _upgraded_to_full = false; }
427 inline void record_upgrade_to_full() { _upgraded_to_full = true; }
428
429 inline size_t capture_old_usage(size_t usage);
430 inline void set_previous_promotion(size_t promoted_bytes);
431 inline size_t get_previous_promotion() const;
432
433 // Returns previous value
434 inline size_t set_promoted_reserve(size_t new_val);
435 inline size_t get_promoted_reserve() const;
436
437 inline void reset_promoted_expended();
438 inline size_t expend_promoted(size_t increment);
439 inline size_t unexpend_promoted(size_t decrement);
440 inline size_t get_promoted_expended();
441
442 // Returns previous value
443 inline size_t set_old_evac_reserve(size_t new_val);
444 inline size_t get_old_evac_reserve() const;
445
446 inline void reset_old_evac_expended();
447 inline size_t expend_old_evac(size_t increment);
448 inline size_t get_old_evac_expended();
449
450 // Returns previous value
451 inline size_t set_young_evac_reserve(size_t new_val);
452 inline size_t get_young_evac_reserve() const;
453
454 // Returns previous value. This is a signed value because it is the amount borrowed minus the amount reserved for
455 // young-gen evacuation. In case we cannot borrow much, this value might be negative.
456 inline intptr_t set_alloc_supplement_reserve(intptr_t new_val);
457 inline intptr_t get_alloc_supplement_reserve() const;
458
459 private:
460 void manage_satb_barrier(bool active);
461
462 enum CancelState {
463 // Normal state. GC has not been cancelled and is open for cancellation.
464 // Worker threads can suspend for safepoint.
465 CANCELLABLE,
466
467 // GC has been cancelled. Worker threads can not suspend for
468 // safepoint but must finish their work as soon as possible.
469 CANCELLED,
470
471 // GC has not been cancelled and must not be cancelled. At least
472 // one worker thread checks for pending safepoint and may suspend
473 // if a safepoint is pending.
474 NOT_CANCELLED
475 };
476
477 double _cancel_requested_time;
478 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
479
480 // Returns true if cancel request was successfully communicated.
481 // Returns false if some other thread already communicated cancel
482 // request. A true return value does not mean GC has been
483 // cancelled, only that the process of cancelling GC has begun.
484 bool try_cancel_gc();
485
486 public:
487 inline bool cancelled_gc() const;
488 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
489
490 inline void clear_cancelled_gc(bool clear_oom_handler = true);
491
492 void cancel_concurrent_mark();
493 void cancel_gc(GCCause::Cause cause);
494
495 public:
496 // Elastic heap support
497 void entry_uncommit(double shrink_before, size_t shrink_until);
498 void op_uncommit(double shrink_before, size_t shrink_until);
499
500 private:
501 // GC support
502 // Evacuation
503 void evacuate_collection_set(bool concurrent);
504 // Concurrent root processing
505 void prepare_concurrent_roots();
506 void finish_concurrent_roots();
507 // Concurrent class unloading support
508 void do_class_unloading();
509 // Reference updating
510 void prepare_update_heap_references(bool concurrent);
511 void update_heap_references(bool concurrent);
512 // Final update region states
513 void update_heap_region_states(bool concurrent);
514 void rebuild_free_set(bool concurrent);
515
516 void rendezvous_threads();
517 void recycle_trash();
518 public:
519 void notify_gc_progress() { _progress_last_gc.set(); }
520 void notify_gc_no_progress() { _progress_last_gc.unset(); }
521
522 //
523 // Mark support
524 private:
525 ShenandoahYoungGeneration* _young_generation;
526 ShenandoahGeneration* _global_generation;
527 ShenandoahOldGeneration* _old_generation;
528
529 ShenandoahControlThread* _control_thread;
530 ShenandoahRegulatorThread* _regulator_thread;
531 ShenandoahCollectorPolicy* _shenandoah_policy;
532 ShenandoahMode* _gc_mode;
533 ShenandoahFreeSet* _free_set;
534 ShenandoahPacer* _pacer;
535 ShenandoahVerifier* _verifier;
536
537 ShenandoahPhaseTimings* _phase_timings;
538 ShenandoahEvacuationTracker* _evac_tracker;
539 ShenandoahMmuTracker _mmu_tracker;
540 ShenandoahGenerationSizer _generation_sizer;
541
542 ShenandoahRegulatorThread* regulator_thread() { return _regulator_thread; }
543
544 public:
545 ShenandoahControlThread* control_thread() { return _control_thread; }
546 ShenandoahYoungGeneration* young_generation() const { return _young_generation; }
547 ShenandoahGeneration* global_generation() const { return _global_generation; }
548 ShenandoahOldGeneration* old_generation() const { return _old_generation; }
549 ShenandoahGeneration* generation_for(ShenandoahRegionAffiliation affiliation) const;
550 const ShenandoahGenerationSizer* generation_sizer() const { return &_generation_sizer; }
551
552 size_t max_size_for(ShenandoahGeneration* generation) const;
553 size_t min_size_for(ShenandoahGeneration* generation) const;
554
555 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
556 ShenandoahMode* mode() const { return _gc_mode; }
557 ShenandoahFreeSet* free_set() const { return _free_set; }
558 ShenandoahPacer* pacer() const { return _pacer; }
559
560 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
561 ShenandoahEvacuationTracker* evac_tracker() const { return _evac_tracker; }
562
563 void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
564 void on_cycle_end(ShenandoahGeneration* generation);
565
566 ShenandoahVerifier* verifier();
567
568 // ---------- VM subsystem bindings
569 //
570 private:
571 ShenandoahMonitoringSupport* _monitoring_support;
572 MemoryPool* _memory_pool;
573 MemoryPool* _young_gen_memory_pool;
574 MemoryPool* _old_gen_memory_pool;
575
576 GCMemoryManager _stw_memory_manager;
577 GCMemoryManager _cycle_memory_manager;
578 ConcurrentGCTimer* _gc_timer;
579 SoftRefPolicy _soft_ref_policy;
580
581 // For exporting to SA
582 int _log_min_obj_alignment_in_bytes;
583 public:
584 ShenandoahMonitoringSupport* monitoring_support() const { return _monitoring_support; }
585 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
586 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
587 SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
588
589 GrowableArray<GCMemoryManager*> memory_managers() override;
590 GrowableArray<MemoryPool*> memory_pools() override;
591 MemoryUsage memory_usage() override;
592 GCTracer* tracer();
593 ConcurrentGCTimer* gc_timer() const;
594
595 // ---------- Class Unloading
596 //
597 private:
598 ShenandoahSharedFlag _is_aging_cycle;
599 ShenandoahSharedFlag _unload_classes;
600 ShenandoahUnload _unloader;
601
602 public:
603 void set_unload_classes(bool uc);
604 bool unload_classes() const;
605
606 // Perform STW class unloading and weak root cleaning
607 void parallel_cleaning(bool full_gc);
608
609 private:
610 void stw_unload_classes(bool full_gc);
611 void stw_process_weak_roots(bool full_gc);
612 void stw_weak_refs(bool full_gc);
613
614 inline void assert_lock_for_affiliation(ShenandoahRegionAffiliation orig_affiliation,
615 ShenandoahRegionAffiliation new_affiliation);
616
617 // Heap iteration support
618 void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
619 bool prepare_aux_bitmap_for_iteration();
620 void reclaim_aux_bitmap_for_iteration();
621
622 // ---------- Generic interface hooks
623 // Minor things that super-interface expects us to implement to play nice with
624 // the rest of runtime. Some of the things here are not required to be implemented,
625 // and can be stubbed out.
626 //
627 public:
628 bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
629
630 inline bool is_in(const void* p) const override;
631
632 inline bool is_in_active_generation(oop obj) const;
633 inline bool is_in_young(const void* p) const;
634 inline bool is_in_old(const void* p) const;
635 inline bool is_old(oop pobj) const;
636
637 inline ShenandoahRegionAffiliation region_affiliation(const ShenandoahHeapRegion* r);
638 inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahRegionAffiliation new_affiliation);
639
640 inline ShenandoahRegionAffiliation region_affiliation(size_t index);
641 inline void set_affiliation(size_t index, ShenandoahRegionAffiliation new_affiliation);
642
643 bool requires_barriers(stackChunkOop obj) const override;
644
645 MemRegion reserved_region() const { return _reserved; }
646 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
647
648 void collect(GCCause::Cause cause) override;
649 void do_full_collection(bool clear_all_soft_refs) override;
650
651 // Used for parsing heap during error printing
652 HeapWord* block_start(const void* addr) const;
653 bool block_is_obj(const HeapWord* addr) const;
654 bool print_location(outputStream* st, void* addr) const override;
655
656 // Used for native heap walkers: heap dumpers, mostly
657 void object_iterate(ObjectClosure* cl) override;
658 // Parallel heap iteration support
659 ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
660
661 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
675 void verify_nmethod(nmethod* nm) override {}
676
677 // ---------- Pinning hooks
678 //
679 public:
680 // Shenandoah supports per-object (per-region) pinning
681 void pin_object(JavaThread* thread, oop obj) override;
682 void unpin_object(JavaThread* thread, oop obj) override;
683
684 void sync_pinned_region_status();
685 void assert_pinned_region_status() NOT_DEBUG_RETURN;
686
687 // ---------- Concurrent Stack Processing support
688 //
689 public:
690 bool uses_stack_watermark_barrier() const override { return true; }
691
692 // ---------- Allocation support
693 //
694 private:
695 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region, bool is_promotion);
696
697 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
698 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
699 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
700
701 inline HeapWord* allocate_from_plab(Thread* thread, size_t size, bool is_promotion);
702 HeapWord* allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion);
703 HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size);
704
705 public:
706 HeapWord* allocate_memory(ShenandoahAllocRequest& request, bool is_promotion);
707 HeapWord* mem_allocate(size_t size, bool* what) override;
708 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
709 size_t size,
710 Metaspace::MetadataType mdtype) override;
711
712 void notify_mutator_alloc_words(size_t words, bool waste);
713
714 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
715 size_t tlab_capacity(Thread *thr) const override;
716 size_t unsafe_max_tlab_alloc(Thread *thread) const override;
717 size_t max_tlab_size() const override;
718 size_t tlab_used(Thread* ignored) const override;
719
720 void ensure_parsability(bool retire_labs) override;
721
722 void labs_make_parsable();
723 void tlabs_retire(bool resize);
724 void gclabs_retire(bool resize);
725
726 void set_young_lab_region_flags();
727
728 // ---------- Marking support
729 //
730 private:
731 ShenandoahMarkingContext* _marking_context;
732 MemRegion _bitmap_region;
733 MemRegion _aux_bitmap_region;
734 MarkBitMap _verification_bit_map;
735 MarkBitMap _aux_bit_map;
736
737 size_t _bitmap_size;
738 size_t _bitmap_regions_per_slice;
739 size_t _bitmap_bytes_per_slice;
740
741 size_t _pretouch_heap_page_size;
742 size_t _pretouch_bitmap_page_size;
743
744 bool _bitmap_region_special;
745 bool _aux_bitmap_region_special;
746
747 ShenandoahLiveData** _liveness_cache;
748
749 public:
750 inline ShenandoahMarkingContext* complete_marking_context() const;
751 inline ShenandoahMarkingContext* marking_context() const;
752
753 template<class T>
754 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
755
756 template<class T>
757 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
758
759 template<class T>
760 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
761
762 // SATB barriers hooks
763 inline bool requires_marking(const void* entry) const;
764
765 // Support for bitmap uncommits
766 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
767 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
768 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
769
770 // Liveness caching support
771 ShenandoahLiveData* get_liveness_cache(uint worker_id);
772 void flush_liveness_cache(uint worker_id);
773
774 size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
775
776 // ---------- Evacuation support
777 //
778 private:
779 ShenandoahCollectionSet* _collection_set;
780 ShenandoahEvacOOMHandler _oom_evac_handler;
781 ShenandoahSharedFlag _old_gen_oom_evac;
782
783 inline oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahRegionAffiliation target_gen);
784 void handle_old_evacuation(HeapWord* obj, size_t words, bool promotion);
785 void handle_old_evacuation_failure();
786
787 public:
788 void handle_promotion_failure();
789 void report_promotion_failure(Thread* thread, size_t size);
790
791 static address in_cset_fast_test_addr();
792
793 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
794
795 // Checks if object is in the collection set.
796 inline bool in_collection_set(oop obj) const;
797
798 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
799 inline bool in_collection_set_loc(void* loc) const;
800
801 // Evacuates or promotes object src. Returns the evacuated object, either evacuated
802 // by this thread, or by some other thread.
803 inline oop evacuate_object(oop src, Thread* thread);
804
805 // Call before/after evacuation.
806 inline void enter_evacuation(Thread* t);
807 inline void leave_evacuation(Thread* t);
808
809 inline bool clear_old_evacuation_failure();
810
811 // ---------- Generational support
812 //
813 private:
814 RememberedScanner* _card_scan;
815
816 public:
817 inline RememberedScanner* card_scan() { return _card_scan; }
818 void clear_cards_for(ShenandoahHeapRegion* region);
819 void dirty_cards(HeapWord* start, HeapWord* end);
820 void clear_cards(HeapWord* start, HeapWord* end);
821 void mark_card_as_dirty(void* location);
822 void retire_plab(PLAB* plab);
823 void retire_plab(PLAB* plab, Thread* thread);
824 void cancel_old_gc();
825 bool is_old_gc_active();
826 void coalesce_and_fill_old_regions();
827 bool adjust_generation_sizes();
828
829 // ---------- Helper functions
830 //
831 public:
832 template <class T>
833 inline void conc_update_with_forwarded(T* p);
834
835 template <class T>
836 inline void update_with_forwarded(T* p);
837
838 static inline void atomic_update_oop(oop update, oop* addr, oop compare);
839 static inline void atomic_update_oop(oop update, narrowOop* addr, oop compare);
840 static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
841
842 static inline bool atomic_update_oop_check(oop update, oop* addr, oop compare);
843 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, oop compare);
844 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
845
846 static inline void atomic_clear_oop( oop* addr, oop compare);
847 static inline void atomic_clear_oop(narrowOop* addr, oop compare);
848 static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
849
850 size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
851
852 static inline void increase_object_age(oop obj, uint additional_age);
853 static inline uint get_object_age(oop obj);
854
855 void transfer_old_pointers_from_satb();
856
857 void log_heap_status(const char *msg) const;
858
859 private:
860 void trash_cset_regions();
861
862 // ---------- Testing helpers functions
863 //
864 private:
865 ShenandoahSharedFlag _inject_alloc_failure;
866
867 void try_inject_alloc_failure();
868 bool should_inject_alloc_failure();
869 };
870
871 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|