1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
28
29 #include "gc/shared/markBitMap.hpp"
30 #include "gc/shared/softRefPolicy.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp"
33 #include "gc/shenandoah/shenandoahAsserts.hpp"
34 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
35 #include "gc/shenandoah/shenandoahLock.hpp"
36 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
37 #include "gc/shenandoah/shenandoahPadding.hpp"
38 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
39 #include "gc/shenandoah/shenandoahUnload.hpp"
40 #include "memory/metaspace.hpp"
41 #include "services/memoryManager.hpp"
42 #include "utilities/globalDefinitions.hpp"
43 #include "utilities/stack.hpp"
44
45 class ConcurrentGCTimer;
46 class ObjectIterateScanRootClosure;
47 class ShenandoahCollectorPolicy;
48 class ShenandoahControlThread;
49 class ShenandoahGCSession;
50 class ShenandoahGCStateResetter;
51 class ShenandoahHeuristics;
52 class ShenandoahMarkingContext;
53 class ShenandoahMode;
54 class ShenandoahPhaseTimings;
55 class ShenandoahHeap;
56 class ShenandoahHeapRegion;
57 class ShenandoahHeapRegionClosure;
58 class ShenandoahCollectionSet;
59 class ShenandoahFreeSet;
60 class ShenandoahConcurrentMark;
61 class ShenandoahFullGC;
62 class ShenandoahMonitoringSupport;
63 class ShenandoahPacer;
64 class ShenandoahReferenceProcessor;
65 class ShenandoahVerifier;
66 class ShenandoahWorkerThreads;
67 class VMStructs;
68
69 // Used for buffering per-region liveness data.
70 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
71 // The ShenandoahHeap array has max-workers elements, each of which is an array of
72 // uint16_t * max_regions. The choice of uint16_t is not accidental:
73 // there is a tradeoff between static/dynamic footprint that translates
74 // into cache pressure (which is already high during marking), and
75 // too many atomic updates. uint32_t is too large, uint8_t is too small.
76 typedef uint16_t ShenandoahLiveData;
77 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
78
79 class ShenandoahRegionIterator : public StackObj {
80 private:
81 ShenandoahHeap* _heap;
82
92 ShenandoahRegionIterator(ShenandoahHeap* heap);
93
94 // Reset iterator to default state
95 void reset();
96
97 // Returns next region, or null if there are no more regions.
98 // This is multi-thread-safe.
99 inline ShenandoahHeapRegion* next();
100
101 // This is *not* MT safe. However, in the absence of multithreaded access, it
102 // can be used to determine if there is more work to do.
103 bool has_next() const;
104 };
105
106 class ShenandoahHeapRegionClosure : public StackObj {
107 public:
108 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
109 virtual bool is_thread_safe() { return false; }
110 };
111
112 typedef ShenandoahLock ShenandoahHeapLock;
113 typedef ShenandoahLocker ShenandoahHeapLocker;
114 typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
115
116 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
117 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
118 // See ShenandoahControlThread for GC cycle structure.
119 //
120 class ShenandoahHeap : public CollectedHeap, public ShenandoahSpaceInfo {
121 friend class ShenandoahAsserts;
122 friend class VMStructs;
123 friend class ShenandoahGCSession;
124 friend class ShenandoahGCStateResetter;
125 friend class ShenandoahParallelObjectIterator;
126 friend class ShenandoahSafepoint;
127 // Supported GC
128 friend class ShenandoahConcurrentGC;
129 friend class ShenandoahDegenGC;
130 friend class ShenandoahFullGC;
131 friend class ShenandoahUnload;
132
133 // ---------- Locks that guard important data structures in Heap
134 //
135 private:
136 ShenandoahHeapLock _lock;
137
138 public:
139 ShenandoahHeapLock* lock() {
140 return &_lock;
141 }
142
143 // ---------- Initialization, termination, identification, printing routines
144 //
145 public:
146 static ShenandoahHeap* heap();
147
148 const char* name() const override { return "Shenandoah"; }
149 ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
150
151 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
152 jint initialize() override;
153 void post_initialize() override;
154 void initialize_mode();
155 void initialize_heuristics();
156
157 void initialize_serviceability() override;
158
159 void print_on(outputStream* st) const override;
160 void print_extended_on(outputStream *st) const override;
161 void print_tracing_info() const override;
162 void print_heap_regions_on(outputStream* st) const;
163
164 void stop() override;
165
166 void prepare_for_verify() override;
167 void verify(VerifyOption vo) override;
168
169 // WhiteBox testing support.
170 bool supports_concurrent_gc_breakpoints() const override {
171 return true;
172 }
173
174 // ---------- Heap counters and metrics
175 //
176 private:
177 size_t _initial_size;
178 size_t _minimum_size;
179 volatile size_t _soft_max_size;
180 shenandoah_padding(0);
181 volatile size_t _used;
182 volatile size_t _committed;
183 volatile size_t _bytes_allocated_since_gc_start;
184 shenandoah_padding(1);
185
186 public:
187 void increase_used(size_t bytes);
188 void decrease_used(size_t bytes);
189 void set_used(size_t bytes);
190
191 void increase_committed(size_t bytes);
192 void decrease_committed(size_t bytes);
193 void increase_allocated(size_t bytes);
194
195 size_t bytes_allocated_since_gc_start() const override;
196 void reset_bytes_allocated_since_gc_start();
197
198 size_t min_capacity() const;
199 size_t max_capacity() const override;
200 size_t soft_max_capacity() const override;
201 size_t initial_capacity() const;
202 size_t capacity() const override;
203 size_t used() const override;
204 size_t committed() const;
205 size_t available() const override;
206
207 void set_soft_max_capacity(size_t v);
208
209 // ---------- Workers handling
210 //
211 private:
212 uint _max_workers;
213 ShenandoahWorkerThreads* _workers;
214 ShenandoahWorkerThreads* _safepoint_workers;
215
216 public:
217 uint max_workers();
218 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
219
220 WorkerThreads* workers() const;
221 WorkerThreads* safepoint_workers() override;
222
223 void gc_threads_do(ThreadClosure* tcl) const override;
224
225 // ---------- Heap regions handling machinery
226 //
227 private:
228 MemRegion _heap_region;
229 bool _heap_region_special;
230 size_t _num_regions;
231 ShenandoahHeapRegion** _regions;
232 ShenandoahRegionIterator _update_refs_iterator;
233
234 public:
235
236 inline HeapWord* base() const { return _heap_region.start(); }
237
238 inline size_t num_regions() const { return _num_regions; }
239 inline bool is_heap_region_special() { return _heap_region_special; }
240
241 inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const;
242 inline size_t heap_region_index_containing(const void* addr) const;
243
244 inline ShenandoahHeapRegion* get_region(size_t region_idx) const;
245
246 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
247 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
248
249 // ---------- GC state machinery
250 //
251 // GC state describes the important parts of collector state, that may be
252 // used to make barrier selection decisions in the native and generated code.
253 // Multiple bits can be set at once.
254 //
255 // Important invariant: when GC state is zero, the heap is stable, and no barriers
256 // are required.
257 //
258 public:
259 enum GCStateBitPos {
260 // Heap has forwarded objects: needs LRB barriers.
261 HAS_FORWARDED_BITPOS = 0,
262
263 // Heap is under marking: needs SATB barriers.
264 MARKING_BITPOS = 1,
265
266 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
267 EVACUATION_BITPOS = 2,
268
269 // Heap is under updating: needs no additional barriers.
270 UPDATEREFS_BITPOS = 3,
271
272 // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
273 WEAK_ROOTS_BITPOS = 4,
274 };
275
276 enum GCState {
277 STABLE = 0,
278 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
279 MARKING = 1 << MARKING_BITPOS,
280 EVACUATION = 1 << EVACUATION_BITPOS,
281 UPDATEREFS = 1 << UPDATEREFS_BITPOS,
282 WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
283 };
284
285 private:
286 ShenandoahSharedBitmap _gc_state;
287 ShenandoahSharedFlag _degenerated_gc_in_progress;
288 ShenandoahSharedFlag _full_gc_in_progress;
289 ShenandoahSharedFlag _full_gc_move_in_progress;
290 ShenandoahSharedFlag _concurrent_strong_root_in_progress;
291
292 size_t _gc_no_progress_count;
293
294 void set_gc_state_all_threads(char state);
295 void set_gc_state_mask(uint mask, bool value);
296
297 public:
298 char gc_state() const;
299
300 void set_concurrent_mark_in_progress(bool in_progress);
301 void set_evacuation_in_progress(bool in_progress);
302 void set_update_refs_in_progress(bool in_progress);
303 void set_degenerated_gc_in_progress(bool in_progress);
304 void set_full_gc_in_progress(bool in_progress);
305 void set_full_gc_move_in_progress(bool in_progress);
306 void set_has_forwarded_objects(bool cond);
307 void set_concurrent_strong_root_in_progress(bool cond);
308 void set_concurrent_weak_root_in_progress(bool cond);
309
310 inline bool is_stable() const;
311 inline bool is_idle() const;
312 inline bool is_concurrent_mark_in_progress() const;
313 inline bool is_update_refs_in_progress() const;
314 inline bool is_evacuation_in_progress() const;
315 inline bool is_degenerated_gc_in_progress() const;
316 inline bool is_full_gc_in_progress() const;
317 inline bool is_full_gc_move_in_progress() const;
318 inline bool has_forwarded_objects() const;
319
320 inline bool is_stw_gc_in_progress() const;
321 inline bool is_concurrent_strong_root_in_progress() const;
322 inline bool is_concurrent_weak_root_in_progress() const;
323
324 private:
325 enum CancelState {
326 // Normal state. GC has not been cancelled and is open for cancellation.
327 // Worker threads can suspend for safepoint.
328 CANCELLABLE,
329
330 // GC has been cancelled. Worker threads can not suspend for
331 // safepoint but must finish their work as soon as possible.
332 CANCELLED
333 };
334
335 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
336 bool try_cancel_gc();
337
338 public:
339
340 inline bool cancelled_gc() const;
341 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
342
343 inline void clear_cancelled_gc();
344
345 void cancel_gc(GCCause::Cause cause);
346
347 public:
348 // Elastic heap support
349 void entry_uncommit(double shrink_before, size_t shrink_until);
350 void op_uncommit(double shrink_before, size_t shrink_until);
351
352 private:
353 // GC support
354 // Reset bitmap, prepare regions for new GC cycle
355 void prepare_gc();
356 void prepare_regions_and_collection_set(bool concurrent);
357 // Evacuation
358 void evacuate_collection_set(bool concurrent);
359 // Concurrent root processing
360 void prepare_concurrent_roots();
361 void finish_concurrent_roots();
362 // Concurrent class unloading support
363 void do_class_unloading();
364 // Reference updating
365 void prepare_update_heap_references(bool concurrent);
366 void update_heap_references(bool concurrent);
367 // Final update region states
368 void update_heap_region_states(bool concurrent);
369 void rebuild_free_set(bool concurrent);
370
371 void rendezvous_threads();
372 void recycle_trash();
373 public:
374 void notify_gc_progress();
375 void notify_gc_no_progress();
376 size_t get_gc_no_progress_count() const;
377
378 //
379 // Mark support
380 private:
381 ShenandoahControlThread* _control_thread;
382 ShenandoahCollectorPolicy* _shenandoah_policy;
383 ShenandoahMode* _gc_mode;
384 ShenandoahHeuristics* _heuristics;
385 ShenandoahFreeSet* _free_set;
386 ShenandoahPacer* _pacer;
387 ShenandoahVerifier* _verifier;
388
389 ShenandoahPhaseTimings* _phase_timings;
390
391 ShenandoahControlThread* control_thread() { return _control_thread; }
392
393 public:
394 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
395 ShenandoahMode* mode() const { return _gc_mode; }
396 ShenandoahHeuristics* heuristics() const { return _heuristics; }
397 ShenandoahFreeSet* free_set() const { return _free_set; }
398 ShenandoahPacer* pacer() const { return _pacer; }
399
400 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
401
402 ShenandoahVerifier* verifier();
403
404 // ---------- VM subsystem bindings
405 //
406 private:
407 ShenandoahMonitoringSupport* _monitoring_support;
408 MemoryPool* _memory_pool;
409 GCMemoryManager _stw_memory_manager;
410 GCMemoryManager _cycle_memory_manager;
411 ConcurrentGCTimer* _gc_timer;
412 SoftRefPolicy _soft_ref_policy;
413
414 // For exporting to SA
415 int _log_min_obj_alignment_in_bytes;
416 public:
417 ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
418 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
419 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
420 SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
421
422 GrowableArray<GCMemoryManager*> memory_managers() override;
423 GrowableArray<MemoryPool*> memory_pools() override;
424 MemoryUsage memory_usage() override;
425 GCTracer* tracer();
426 ConcurrentGCTimer* gc_timer() const;
427
428 // ---------- Reference processing
429 //
430 private:
431 ShenandoahReferenceProcessor* const _ref_processor;
432
433 public:
434 ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
435
436 // ---------- Class Unloading
437 //
438 private:
439 ShenandoahSharedFlag _unload_classes;
440 ShenandoahUnload _unloader;
441
442 public:
443 void set_unload_classes(bool uc);
444 bool unload_classes() const;
445
446 // Perform STW class unloading and weak root cleaning
447 void parallel_cleaning(bool full_gc);
448
449 private:
450 void stw_unload_classes(bool full_gc);
451 void stw_process_weak_roots(bool full_gc);
452 void stw_weak_refs(bool full_gc);
453
454 // Heap iteration support
455 void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
456 bool prepare_aux_bitmap_for_iteration();
457 void reclaim_aux_bitmap_for_iteration();
458
459 // ---------- Generic interface hooks
460 // Minor things that super-interface expects us to implement to play nice with
461 // the rest of runtime. Some of the things here are not required to be implemented,
462 // and can be stubbed out.
463 //
464 public:
465 bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
466
467 bool is_in(const void* p) const override;
468
469 bool requires_barriers(stackChunkOop obj) const override;
470
471 MemRegion reserved_region() const { return _reserved; }
472 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
473
474 void collect(GCCause::Cause cause) override;
475 void do_full_collection(bool clear_all_soft_refs) override;
476
477 // Used for parsing heap during error printing
478 HeapWord* block_start(const void* addr) const;
479 bool block_is_obj(const HeapWord* addr) const;
480 bool print_location(outputStream* st, void* addr) const override;
481
482 // Used for native heap walkers: heap dumpers, mostly
483 void object_iterate(ObjectClosure* cl) override;
484 // Parallel heap iteration support
485 ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
486
487 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
501 void verify_nmethod(nmethod* nm) override {}
502
503 // ---------- Pinning hooks
504 //
505 public:
506 // Shenandoah supports per-object (per-region) pinning
507 void pin_object(JavaThread* thread, oop obj) override;
508 void unpin_object(JavaThread* thread, oop obj) override;
509
510 void sync_pinned_region_status();
511 void assert_pinned_region_status() NOT_DEBUG_RETURN;
512
513 // ---------- Concurrent Stack Processing support
514 //
515 public:
516 bool uses_stack_watermark_barrier() const override { return true; }
517
518 // ---------- Allocation support
519 //
520 private:
521 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
522 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
523 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
524 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
525
526 public:
527 HeapWord* allocate_memory(ShenandoahAllocRequest& request);
528 HeapWord* mem_allocate(size_t size, bool* what) override;
529 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
530 size_t size,
531 Metaspace::MetadataType mdtype) override;
532
533 void notify_mutator_alloc_words(size_t words, bool waste);
534
535 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
536 size_t tlab_capacity(Thread *thr) const override;
537 size_t unsafe_max_tlab_alloc(Thread *thread) const override;
538 size_t max_tlab_size() const override;
539 size_t tlab_used(Thread* ignored) const override;
540
541 void ensure_parsability(bool retire_labs) override;
542
543 void labs_make_parsable();
544 void tlabs_retire(bool resize);
545 void gclabs_retire(bool resize);
546
547 // ---------- Marking support
548 //
549 private:
550 ShenandoahMarkingContext* _marking_context;
551 MemRegion _bitmap_region;
552 MemRegion _aux_bitmap_region;
553 MarkBitMap _verification_bit_map;
554 MarkBitMap _aux_bit_map;
555
556 size_t _bitmap_size;
557 size_t _bitmap_regions_per_slice;
558 size_t _bitmap_bytes_per_slice;
559
560 size_t _pretouch_heap_page_size;
561 size_t _pretouch_bitmap_page_size;
562
563 bool _bitmap_region_special;
564 bool _aux_bitmap_region_special;
565
566 ShenandoahLiveData** _liveness_cache;
567
568 public:
569 inline ShenandoahMarkingContext* complete_marking_context() const;
570 inline ShenandoahMarkingContext* marking_context() const;
571 inline void mark_complete_marking_context();
572 inline void mark_incomplete_marking_context();
573
574 template<class T>
575 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
576
577 template<class T>
578 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
579
580 template<class T>
581 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
582
583 void reset_mark_bitmap();
584
585 // SATB barriers hooks
586 inline bool requires_marking(const void* entry) const;
587
588 // Support for bitmap uncommits
589 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
590 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
591 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
592
593 // Liveness caching support
594 ShenandoahLiveData* get_liveness_cache(uint worker_id);
595 void flush_liveness_cache(uint worker_id);
596
597 size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
598
599 // ---------- Evacuation support
600 //
601 private:
602 ShenandoahCollectionSet* _collection_set;
603 ShenandoahEvacOOMHandler _oom_evac_handler;
604
605 public:
606 static address in_cset_fast_test_addr();
607
608 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
609
610 // Checks if object is in the collection set.
611 inline bool in_collection_set(oop obj) const;
612
613 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
614 inline bool in_collection_set_loc(void* loc) const;
615
616 // Evacuates object src. Returns the evacuated object, either evacuated
617 // by this thread, or by some other thread.
618 inline oop evacuate_object(oop src, Thread* thread);
619
620 // Call before/after evacuation.
621 inline void enter_evacuation(Thread* t);
622 inline void leave_evacuation(Thread* t);
623
624 // ---------- Helper functions
625 //
626 public:
627 template <class T>
628 inline void conc_update_with_forwarded(T* p);
629
630 template <class T>
631 inline void update_with_forwarded(T* p);
632
633 static inline void atomic_update_oop(oop update, oop* addr, oop compare);
634 static inline void atomic_update_oop(oop update, narrowOop* addr, oop compare);
635 static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
636
637 static inline bool atomic_update_oop_check(oop update, oop* addr, oop compare);
638 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, oop compare);
639 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
640
641 static inline void atomic_clear_oop( oop* addr, oop compare);
642 static inline void atomic_clear_oop(narrowOop* addr, oop compare);
643 static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
644
645 void trash_humongous_region_at(ShenandoahHeapRegion *r);
646
647 private:
648 void trash_cset_regions();
649
650 // ---------- Testing helpers functions
651 //
652 private:
653 ShenandoahSharedFlag _inject_alloc_failure;
654
655 void try_inject_alloc_failure();
656 bool should_inject_alloc_failure();
657 };
658
659 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|
1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
28 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
29
30 #include "gc/shared/ageTable.hpp"
31 #include "gc/shared/markBitMap.hpp"
32 #include "gc/shared/softRefPolicy.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "gc/shenandoah/shenandoahAgeCensus.hpp"
35 #include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp"
36 #include "gc/shenandoah/shenandoahAsserts.hpp"
37 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
38 #include "gc/shenandoah/shenandoahAsserts.hpp"
39 #include "gc/shenandoah/shenandoahLock.hpp"
40 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
41 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
42 #include "gc/shenandoah/shenandoahGenerationType.hpp"
43 #include "gc/shenandoah/shenandoahMmuTracker.hpp"
44 #include "gc/shenandoah/shenandoahPadding.hpp"
45 #include "gc/shenandoah/shenandoahScanRemembered.hpp"
46 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
47 #include "gc/shenandoah/shenandoahUnload.hpp"
48 #include "memory/metaspace.hpp"
49 #include "services/memoryManager.hpp"
50 #include "utilities/globalDefinitions.hpp"
51 #include "utilities/stack.hpp"
52
53 class ConcurrentGCTimer;
54 class ObjectIterateScanRootClosure;
55 class PLAB;
56 class ShenandoahCollectorPolicy;
57 class ShenandoahControlThread;
58 class ShenandoahRegulatorThread;
59 class ShenandoahGCSession;
60 class ShenandoahGCStateResetter;
61 class ShenandoahGeneration;
62 class ShenandoahYoungGeneration;
63 class ShenandoahOldGeneration;
64 class ShenandoahHeuristics;
65 class ShenandoahOldHeuristics;
66 class ShenandoahYoungHeuristics;
67 class ShenandoahMarkingContext;
68 class ShenandoahPhaseTimings;
69 class ShenandoahHeap;
70 class ShenandoahHeapRegion;
71 class ShenandoahHeapRegionClosure;
72 class ShenandoahCollectionSet;
73 class ShenandoahFreeSet;
74 class ShenandoahConcurrentMark;
75 class ShenandoahFullGC;
76 class ShenandoahMonitoringSupport;
77 class ShenandoahMode;
78 class ShenandoahPacer;
79 class ShenandoahReferenceProcessor;
80 class ShenandoahVerifier;
81 class ShenandoahWorkerThreads;
82 class VMStructs;
83
84 // Used for buffering per-region liveness data.
85 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
86 // The ShenandoahHeap array has max-workers elements, each of which is an array of
87 // uint16_t * max_regions. The choice of uint16_t is not accidental:
88 // there is a tradeoff between static/dynamic footprint that translates
89 // into cache pressure (which is already high during marking), and
90 // too many atomic updates. uint32_t is too large, uint8_t is too small.
91 typedef uint16_t ShenandoahLiveData;
92 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
93
94 class ShenandoahRegionIterator : public StackObj {
95 private:
96 ShenandoahHeap* _heap;
97
107 ShenandoahRegionIterator(ShenandoahHeap* heap);
108
109 // Reset iterator to default state
110 void reset();
111
112 // Returns next region, or null if there are no more regions.
113 // This is multi-thread-safe.
114 inline ShenandoahHeapRegion* next();
115
116 // This is *not* MT safe. However, in the absence of multithreaded access, it
117 // can be used to determine if there is more work to do.
118 bool has_next() const;
119 };
120
121 class ShenandoahHeapRegionClosure : public StackObj {
122 public:
123 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
124 virtual bool is_thread_safe() { return false; }
125 };
126
127 template<ShenandoahGenerationType GENERATION>
128 class ShenandoahGenerationRegionClosure : public ShenandoahHeapRegionClosure {
129 public:
130 explicit ShenandoahGenerationRegionClosure(ShenandoahHeapRegionClosure* cl) : _cl(cl) {}
131 void heap_region_do(ShenandoahHeapRegion* r);
132 virtual bool is_thread_safe() { return _cl->is_thread_safe(); }
133 private:
134 ShenandoahHeapRegionClosure* _cl;
135 };
136
137 typedef ShenandoahLock ShenandoahHeapLock;
138 typedef ShenandoahLocker ShenandoahHeapLocker;
139 typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
140
141 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
142 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
143 // See ShenandoahControlThread for GC cycle structure.
144 //
145 class ShenandoahHeap : public CollectedHeap {
146 friend class ShenandoahAsserts;
147 friend class VMStructs;
148 friend class ShenandoahGCSession;
149 friend class ShenandoahGCStateResetter;
150 friend class ShenandoahParallelObjectIterator;
151 friend class ShenandoahSafepoint;
152 // Supported GC
153 friend class ShenandoahConcurrentGC;
154 friend class ShenandoahOldGC;
155 friend class ShenandoahDegenGC;
156 friend class ShenandoahFullGC;
157 friend class ShenandoahUnload;
158
159 // ---------- Locks that guard important data structures in Heap
160 //
161 private:
162 ShenandoahHeapLock _lock;
163 ShenandoahGeneration* _gc_generation;
164
165 public:
166 ShenandoahHeapLock* lock() {
167 return &_lock;
168 }
169
170 ShenandoahGeneration* active_generation() const {
171 // last or latest generation might be a better name here.
172 return _gc_generation;
173 }
174
175 void set_gc_generation(ShenandoahGeneration* generation) {
176 _gc_generation = generation;
177 }
178
179 ShenandoahHeuristics* heuristics();
180 ShenandoahOldHeuristics* old_heuristics();
181 ShenandoahYoungHeuristics* young_heuristics();
182
183 bool doing_mixed_evacuations();
184 bool is_old_bitmap_stable() const;
185 bool is_gc_generation_young() const;
186
187 // ---------- Initialization, termination, identification, printing routines
188 //
189 public:
190 static ShenandoahHeap* heap();
191
192 const char* name() const override { return "Shenandoah"; }
193 ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
194
195 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
196 jint initialize() override;
197 void post_initialize() override;
198 void initialize_heuristics_generations();
199 virtual void print_init_logger() const;
200 void initialize_serviceability() override;
201
202 void print_on(outputStream* st) const override;
203 void print_extended_on(outputStream *st) const override;
204 void print_tracing_info() const override;
205 void print_heap_regions_on(outputStream* st) const;
206
207 void stop() override;
208
209 void prepare_for_verify() override;
210 void verify(VerifyOption vo) override;
211
212 bool verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste,
213 bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste);
214
215 // WhiteBox testing support.
216 bool supports_concurrent_gc_breakpoints() const override {
217 return true;
218 }
219
220 // ---------- Heap counters and metrics
221 //
222 private:
223 size_t _initial_size;
224 size_t _minimum_size;
225 size_t _promotion_potential;
226 size_t _pad_for_promote_in_place; // bytes of filler
227 size_t _promotable_humongous_regions;
228 size_t _regular_regions_promoted_in_place;
229
230 volatile size_t _soft_max_size;
231 shenandoah_padding(0);
232 volatile size_t _committed;
233 shenandoah_padding(1);
234
235 void increase_used(const ShenandoahAllocRequest& req);
236
237 public:
238 void increase_used(ShenandoahGeneration* generation, size_t bytes);
239 void decrease_used(ShenandoahGeneration* generation, size_t bytes);
240 void increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
241 void decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
242
243 void increase_committed(size_t bytes);
244 void decrease_committed(size_t bytes);
245
246 void reset_bytes_allocated_since_gc_start();
247
248 size_t min_capacity() const;
249 size_t max_capacity() const override;
250 size_t soft_max_capacity() const;
251 size_t initial_capacity() const;
252 size_t capacity() const override;
253 size_t used() const override;
254 size_t committed() const;
255
256 void set_soft_max_capacity(size_t v);
257
258 // ---------- Workers handling
259 //
260 private:
261 uint _max_workers;
262 ShenandoahWorkerThreads* _workers;
263 ShenandoahWorkerThreads* _safepoint_workers;
264
265 public:
266 uint max_workers();
267 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
268
269 WorkerThreads* workers() const;
270 WorkerThreads* safepoint_workers() override;
271
272 void gc_threads_do(ThreadClosure* tcl) const override;
273
274 // ---------- Heap regions handling machinery
275 //
276 private:
277 MemRegion _heap_region;
278 bool _heap_region_special;
279 size_t _num_regions;
280 ShenandoahHeapRegion** _regions;
281 uint8_t* _affiliations; // Holds array of enum ShenandoahAffiliation, including FREE status in non-generational mode
282 ShenandoahRegionIterator _update_refs_iterator;
283
284 public:
285
286 inline HeapWord* base() const { return _heap_region.start(); }
287
288 inline size_t num_regions() const { return _num_regions; }
289 inline bool is_heap_region_special() { return _heap_region_special; }
290
291 inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const;
292 inline size_t heap_region_index_containing(const void* addr) const;
293
294 inline ShenandoahHeapRegion* get_region(size_t region_idx) const;
295
296 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
297 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
298
299 inline ShenandoahMmuTracker* mmu_tracker() { return &_mmu_tracker; };
300
301 // ---------- GC state machinery
302 //
303 // GC state describes the important parts of collector state, that may be
304 // used to make barrier selection decisions in the native and generated code.
305 // Multiple bits can be set at once.
306 //
307 // Important invariant: when GC state is zero, the heap is stable, and no barriers
308 // are required.
309 //
310 public:
311 enum GCStateBitPos {
312 // Heap has forwarded objects: needs LRB barriers.
313 HAS_FORWARDED_BITPOS = 0,
314
315 // Heap is under marking: needs SATB barriers.
316 // For generational mode, it means either young or old marking, or both.
317 MARKING_BITPOS = 1,
318
319 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
320 EVACUATION_BITPOS = 2,
321
322 // Heap is under updating: needs no additional barriers.
323 UPDATEREFS_BITPOS = 3,
324
325 // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
326 WEAK_ROOTS_BITPOS = 4,
327
328 // Young regions are under marking, need SATB barriers.
329 YOUNG_MARKING_BITPOS = 5,
330
331 // Old regions are under marking, need SATB barriers.
332 OLD_MARKING_BITPOS = 6
333 };
334
335 enum GCState {
336 STABLE = 0,
337 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
338 MARKING = 1 << MARKING_BITPOS,
339 EVACUATION = 1 << EVACUATION_BITPOS,
340 UPDATEREFS = 1 << UPDATEREFS_BITPOS,
341 WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
342 YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS,
343 OLD_MARKING = 1 << OLD_MARKING_BITPOS
344 };
345
346 private:
347 ShenandoahSharedBitmap _gc_state;
348 ShenandoahSharedFlag _degenerated_gc_in_progress;
349 ShenandoahSharedFlag _full_gc_in_progress;
350 ShenandoahSharedFlag _full_gc_move_in_progress;
351 ShenandoahSharedFlag _concurrent_strong_root_in_progress;
352
353 size_t _gc_no_progress_count;
354
355 // TODO: Revisit the following comment. It may not accurately represent the true behavior when evacuations fail due to
356 // difficulty finding memory to hold evacuated objects.
357 //
358 // Note that the typical total expenditure on evacuation is less than the associated evacuation reserve because we generally
359 // reserve ShenandoahEvacWaste (> 1.0) times the anticipated evacuation need. In the case that there is an excessive amount
360 // of waste, it may be that one thread fails to grab a new GCLAB, this does not necessarily doom the associated evacuation
361 // effort. If this happens, the requesting thread blocks until some other thread manages to evacuate the offending object.
362 // Only after "all" threads fail to evacuate an object do we consider the evacuation effort to have failed.
363
364 size_t _promoted_reserve; // Bytes reserved within old-gen to hold the results of promotion
365 volatile size_t _promoted_expended; // Bytes of old-gen memory expended on promotions
366
367 size_t _old_evac_reserve; // Bytes reserved within old-gen to hold evacuated objects from old-gen collection set
368 size_t _young_evac_reserve; // Bytes reserved within young-gen to hold evacuated objects from young-gen collection set
369
370 ShenandoahAgeCensus* _age_census; // Age census used for adapting tenuring threshold in generational mode
371
372 // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
373 // hold the results of evacuating to young-gen and to old-gen. These quantitites, stored in _promoted_reserve,
374 // _old_evac_reserve, and _young_evac_reserve, are consulted prior to rebuilding the free set (ShenandoahFreeSet)
375 // in preparation for evacuation. When the free set is rebuilt, we make sure to reserve sufficient memory in the
376 // collector and old_collector sets to hold if _has_evacuation_reserve_quantities is true. The other time we
377 // rebuild the freeset is at the end of GC, as we prepare to idle GC until the next trigger. In this case,
378 // _has_evacuation_reserve_quantities is false because we don't yet know how much memory will need to be evacuated
379 // in the next GC cycle. When _has_evacuation_reserve_quantities is false, the free set rebuild operation reserves
380 // for the collector and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve,
381 // ShenandoahOldEvacReserve, and ShenandoahOldCompactionReserve. In a future planned enhancement, the reserve
382 // for old_collector set when not _has_evacuation_reserve_quantities is based in part on anticipated promotion as
383 // determined by analysis of live data found during the previous GC pass which is one less than the current tenure age.
384 bool _has_evacuation_reserve_quantities;
385
386 void set_gc_state_all_threads(char state);
387 void set_gc_state_mask(uint mask, bool value);
388
389 public:
390 char gc_state() const;
391
392 void set_evacuation_reserve_quantities(bool is_valid);
393 void set_concurrent_young_mark_in_progress(bool in_progress);
394 void set_concurrent_old_mark_in_progress(bool in_progress);
395 void set_evacuation_in_progress(bool in_progress);
396 void set_update_refs_in_progress(bool in_progress);
397 void set_degenerated_gc_in_progress(bool in_progress);
398 void set_full_gc_in_progress(bool in_progress);
399 void set_full_gc_move_in_progress(bool in_progress);
400 void set_has_forwarded_objects(bool cond);
401 void set_concurrent_strong_root_in_progress(bool cond);
402 void set_concurrent_weak_root_in_progress(bool cond);
403
404 void set_aging_cycle(bool cond);
405
406 inline bool is_stable() const;
407 inline bool is_idle() const;
408 inline bool has_evacuation_reserve_quantities() const;
409 inline bool is_concurrent_mark_in_progress() const;
410 inline bool is_concurrent_young_mark_in_progress() const;
411 inline bool is_concurrent_old_mark_in_progress() const;
412 inline bool is_update_refs_in_progress() const;
413 inline bool is_evacuation_in_progress() const;
414 inline bool is_degenerated_gc_in_progress() const;
415 inline bool is_full_gc_in_progress() const;
416 inline bool is_full_gc_move_in_progress() const;
417 inline bool has_forwarded_objects() const;
418
419 inline bool is_stw_gc_in_progress() const;
420 inline bool is_concurrent_strong_root_in_progress() const;
421 inline bool is_concurrent_weak_root_in_progress() const;
422 bool is_prepare_for_old_mark_in_progress() const;
423 inline bool is_aging_cycle() const;
424
425 inline void clear_promotion_potential() { _promotion_potential = 0; };
426 inline void set_promotion_potential(size_t val) { _promotion_potential = val; };
427 inline size_t get_promotion_potential() { return _promotion_potential; };
428
429 inline void set_pad_for_promote_in_place(size_t pad) { _pad_for_promote_in_place = pad; }
430 inline size_t get_pad_for_promote_in_place() { return _pad_for_promote_in_place; }
431
432 inline void reserve_promotable_humongous_regions(size_t region_count) { _promotable_humongous_regions = region_count; }
433 inline void reserve_promotable_regular_regions(size_t region_count) { _regular_regions_promoted_in_place = region_count; }
434
435 inline size_t get_promotable_humongous_regions() { return _promotable_humongous_regions; }
436 inline size_t get_regular_regions_promoted_in_place() { return _regular_regions_promoted_in_place; }
437
438 // Returns previous value
439 inline size_t set_promoted_reserve(size_t new_val);
440 inline size_t get_promoted_reserve() const;
441 inline void augment_promo_reserve(size_t increment);
442
443 inline void reset_promoted_expended();
444 inline size_t expend_promoted(size_t increment);
445 inline size_t unexpend_promoted(size_t decrement);
446 inline size_t get_promoted_expended();
447
448 // Returns previous value
449 inline size_t set_old_evac_reserve(size_t new_val);
450 inline size_t get_old_evac_reserve() const;
451 inline void augment_old_evac_reserve(size_t increment);
452
453 // Returns previous value
454 inline size_t set_young_evac_reserve(size_t new_val);
455 inline size_t get_young_evac_reserve() const;
456
457 // Return the age census object for young gen (in generational mode)
458 inline ShenandoahAgeCensus* age_census() const;
459
460 private:
461 void manage_satb_barrier(bool active);
462
463 enum CancelState {
464 // Normal state. GC has not been cancelled and is open for cancellation.
465 // Worker threads can suspend for safepoint.
466 CANCELLABLE,
467
468 // GC has been cancelled. Worker threads can not suspend for
469 // safepoint but must finish their work as soon as possible.
470 CANCELLED
471 };
472
473 double _cancel_requested_time;
474 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
475
476 // Returns true if cancel request was successfully communicated.
477 // Returns false if some other thread already communicated cancel
478 // request. A true return value does not mean GC has been
479 // cancelled, only that the process of cancelling GC has begun.
480 bool try_cancel_gc();
481
482 public:
483 inline bool cancelled_gc() const;
484 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
485
486 inline void clear_cancelled_gc(bool clear_oom_handler = true);
487
488 void cancel_concurrent_mark();
489 void cancel_gc(GCCause::Cause cause);
490
491 public:
492 // Elastic heap support
493 void entry_uncommit(double shrink_before, size_t shrink_until);
494 void op_uncommit(double shrink_before, size_t shrink_until);
495
496 private:
497 // GC support
498 // Evacuation
499 void evacuate_collection_set(bool concurrent);
500 // Concurrent root processing
501 void prepare_concurrent_roots();
502 void finish_concurrent_roots();
503 // Concurrent class unloading support
504 void do_class_unloading();
505 // Reference updating
506 void prepare_update_heap_references(bool concurrent);
507 void update_heap_references(bool concurrent);
508 // Final update region states
509 void update_heap_region_states(bool concurrent);
510
511 void rendezvous_threads();
512 void recycle_trash();
513 public:
514 void rebuild_free_set(bool concurrent);
515 void notify_gc_progress();
516 void notify_gc_no_progress();
517 size_t get_gc_no_progress_count() const;
518
519 //
520 // Mark support
521 private:
522 ShenandoahYoungGeneration* _young_generation;
523 ShenandoahGeneration* _global_generation;
524 ShenandoahOldGeneration* _old_generation;
525
526 ShenandoahControlThread* _control_thread;
527 ShenandoahRegulatorThread* _regulator_thread;
528 ShenandoahCollectorPolicy* _shenandoah_policy;
529 ShenandoahMode* _gc_mode;
530 ShenandoahFreeSet* _free_set;
531 ShenandoahPacer* _pacer;
532 ShenandoahVerifier* _verifier;
533
534 ShenandoahPhaseTimings* _phase_timings;
535 ShenandoahEvacuationTracker* _evac_tracker;
536 ShenandoahMmuTracker _mmu_tracker;
537 ShenandoahGenerationSizer _generation_sizer;
538
539 ShenandoahRegulatorThread* regulator_thread() { return _regulator_thread; }
540
541 public:
542 ShenandoahControlThread* control_thread() { return _control_thread; }
543 ShenandoahYoungGeneration* young_generation() const { return _young_generation; }
544 ShenandoahGeneration* global_generation() const { return _global_generation; }
545 ShenandoahOldGeneration* old_generation() const { return _old_generation; }
546 ShenandoahGeneration* generation_for(ShenandoahAffiliation affiliation) const;
547 const ShenandoahGenerationSizer* generation_sizer() const { return &_generation_sizer; }
548
549 size_t max_size_for(ShenandoahGeneration* generation) const;
550 size_t min_size_for(ShenandoahGeneration* generation) const;
551
552 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
553 ShenandoahMode* mode() const { return _gc_mode; }
554 ShenandoahFreeSet* free_set() const { return _free_set; }
555 ShenandoahPacer* pacer() const { return _pacer; }
556
557 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
558 ShenandoahEvacuationTracker* evac_tracker() const { return _evac_tracker; }
559
560 void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
561 void on_cycle_end(ShenandoahGeneration* generation);
562
563 ShenandoahVerifier* verifier();
564
565 // ---------- VM subsystem bindings
566 //
567 private:
568 ShenandoahMonitoringSupport* _monitoring_support;
569 MemoryPool* _memory_pool;
570 MemoryPool* _young_gen_memory_pool;
571 MemoryPool* _old_gen_memory_pool;
572
573 GCMemoryManager _stw_memory_manager;
574 GCMemoryManager _cycle_memory_manager;
575 ConcurrentGCTimer* _gc_timer;
576 SoftRefPolicy _soft_ref_policy;
577
578 // For exporting to SA
579 int _log_min_obj_alignment_in_bytes;
580 public:
581 ShenandoahMonitoringSupport* monitoring_support() const { return _monitoring_support; }
582 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
583 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
584 SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
585
586 GrowableArray<GCMemoryManager*> memory_managers() override;
587 GrowableArray<MemoryPool*> memory_pools() override;
588 MemoryUsage memory_usage() override;
589 GCTracer* tracer();
590 ConcurrentGCTimer* gc_timer() const;
591
592 // ---------- Class Unloading
593 //
594 private:
595 ShenandoahSharedFlag _is_aging_cycle;
596 ShenandoahSharedFlag _unload_classes;
597 ShenandoahUnload _unloader;
598
599 public:
600 void set_unload_classes(bool uc);
601 bool unload_classes() const;
602
603 // Perform STW class unloading and weak root cleaning
604 void parallel_cleaning(bool full_gc);
605
606 private:
607 void stw_unload_classes(bool full_gc);
608 void stw_process_weak_roots(bool full_gc);
609 void stw_weak_refs(bool full_gc);
610
611 inline void assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
612 ShenandoahAffiliation new_affiliation);
613
614 // Heap iteration support
615 void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
616 bool prepare_aux_bitmap_for_iteration();
617 void reclaim_aux_bitmap_for_iteration();
618
619 // ---------- Generic interface hooks
620 // Minor things that super-interface expects us to implement to play nice with
621 // the rest of runtime. Some of the things here are not required to be implemented,
622 // and can be stubbed out.
623 //
624 public:
625 bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
626
627 inline bool is_in(const void* p) const override;
628
629 inline bool is_in_active_generation(oop obj) const;
630 inline bool is_in_young(const void* p) const;
631 inline bool is_in_old(const void* p) const;
632 inline bool is_old(oop pobj) const;
633
634 inline ShenandoahAffiliation region_affiliation(const ShenandoahHeapRegion* r);
635 inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation);
636
637 inline ShenandoahAffiliation region_affiliation(size_t index);
638
639 bool requires_barriers(stackChunkOop obj) const override;
640
641 MemRegion reserved_region() const { return _reserved; }
642 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
643
644 void collect(GCCause::Cause cause) override;
645 void do_full_collection(bool clear_all_soft_refs) override;
646
647 // Used for parsing heap during error printing
648 HeapWord* block_start(const void* addr) const;
649 bool block_is_obj(const HeapWord* addr) const;
650 bool print_location(outputStream* st, void* addr) const override;
651
652 // Used for native heap walkers: heap dumpers, mostly
653 void object_iterate(ObjectClosure* cl) override;
654 // Parallel heap iteration support
655 ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
656
657 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
671 void verify_nmethod(nmethod* nm) override {}
672
673 // ---------- Pinning hooks
674 //
675 public:
676 // Shenandoah supports per-object (per-region) pinning
677 void pin_object(JavaThread* thread, oop obj) override;
678 void unpin_object(JavaThread* thread, oop obj) override;
679
680 void sync_pinned_region_status();
681 void assert_pinned_region_status() NOT_DEBUG_RETURN;
682
683 // ---------- Concurrent Stack Processing support
684 //
685 public:
686 bool uses_stack_watermark_barrier() const override { return true; }
687
688 // ---------- Allocation support
689 //
690 private:
691 // How many bytes to transfer between old and young after we have finished recycling collection set regions?
692 size_t _old_regions_surplus;
693 size_t _old_regions_deficit;
694
695 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region, bool is_promotion);
696
697 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
698 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
699 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
700
701 inline HeapWord* allocate_from_plab(Thread* thread, size_t size, bool is_promotion);
702 HeapWord* allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion);
703 HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size);
704
705 public:
706 HeapWord* allocate_memory(ShenandoahAllocRequest& request, bool is_promotion);
707 HeapWord* mem_allocate(size_t size, bool* what) override;
708 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
709 size_t size,
710 Metaspace::MetadataType mdtype) override;
711
712 void notify_mutator_alloc_words(size_t words, size_t waste);
713
714 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
715 size_t tlab_capacity(Thread *thr) const override;
716 size_t unsafe_max_tlab_alloc(Thread *thread) const override;
717 size_t max_tlab_size() const override;
718 size_t tlab_used(Thread* ignored) const override;
719
720 void ensure_parsability(bool retire_labs) override;
721
722 void labs_make_parsable();
723 void tlabs_retire(bool resize);
724 void gclabs_retire(bool resize);
725
726 inline void set_old_region_surplus(size_t surplus) { _old_regions_surplus = surplus; };
727 inline void set_old_region_deficit(size_t deficit) { _old_regions_deficit = deficit; };
728
729 inline size_t get_old_region_surplus() { return _old_regions_surplus; };
730 inline size_t get_old_region_deficit() { return _old_regions_deficit; };
731
732 // ---------- Marking support
733 //
734 private:
735 ShenandoahMarkingContext* _marking_context;
736 MemRegion _bitmap_region;
737 MemRegion _aux_bitmap_region;
738 MarkBitMap _verification_bit_map;
739 MarkBitMap _aux_bit_map;
740
741 size_t _bitmap_size;
742 size_t _bitmap_regions_per_slice;
743 size_t _bitmap_bytes_per_slice;
744
745 size_t _pretouch_heap_page_size;
746 size_t _pretouch_bitmap_page_size;
747
748 bool _bitmap_region_special;
749 bool _aux_bitmap_region_special;
750
751 ShenandoahLiveData** _liveness_cache;
752
753 public:
754 inline ShenandoahMarkingContext* complete_marking_context() const;
755 inline ShenandoahMarkingContext* marking_context() const;
756
757 template<class T>
758 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
759
760 template<class T>
761 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
762
763 template<class T>
764 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
765
766 // SATB barriers hooks
767 inline bool requires_marking(const void* entry) const;
768
769 // Support for bitmap uncommits
770 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
771 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
772 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
773
774 // Liveness caching support
775 ShenandoahLiveData* get_liveness_cache(uint worker_id);
776 void flush_liveness_cache(uint worker_id);
777
778 size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
779
780 // ---------- Evacuation support
781 //
782 private:
783 ShenandoahCollectionSet* _collection_set;
784 ShenandoahEvacOOMHandler _oom_evac_handler;
785 ShenandoahSharedFlag _old_gen_oom_evac;
786
787 inline oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
788 void handle_old_evacuation(HeapWord* obj, size_t words, bool promotion);
789 void handle_old_evacuation_failure();
790
791 public:
792 void report_promotion_failure(Thread* thread, size_t size);
793
794 static address in_cset_fast_test_addr();
795
796 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
797
798 // Checks if object is in the collection set.
799 inline bool in_collection_set(oop obj) const;
800
801 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
802 inline bool in_collection_set_loc(void* loc) const;
803
804 // Evacuates or promotes object src. Returns the evacuated object, either evacuated
805 // by this thread, or by some other thread.
806 inline oop evacuate_object(oop src, Thread* thread);
807
808 // Call before/after evacuation.
809 inline void enter_evacuation(Thread* t);
810 inline void leave_evacuation(Thread* t);
811
812 inline bool clear_old_evacuation_failure();
813
814 // ---------- Generational support
815 //
816 private:
817 RememberedScanner* _card_scan;
818
819 public:
820 inline RememberedScanner* card_scan() { return _card_scan; }
821 void clear_cards_for(ShenandoahHeapRegion* region);
822 void mark_card_as_dirty(void* location);
823 void retire_plab(PLAB* plab);
824 void retire_plab(PLAB* plab, Thread* thread);
825 void cancel_old_gc();
826
827 void adjust_generation_sizes_for_next_cycle(size_t old_xfer_limit, size_t young_cset_regions, size_t old_cset_regions);
828
829 // ---------- Helper functions
830 //
831 public:
832 template <class T>
833 inline void conc_update_with_forwarded(T* p);
834
835 template <class T>
836 inline void update_with_forwarded(T* p);
837
838 static inline void atomic_update_oop(oop update, oop* addr, oop compare);
839 static inline void atomic_update_oop(oop update, narrowOop* addr, oop compare);
840 static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
841
842 static inline bool atomic_update_oop_check(oop update, oop* addr, oop compare);
843 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, oop compare);
844 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
845
846 static inline void atomic_clear_oop( oop* addr, oop compare);
847 static inline void atomic_clear_oop(narrowOop* addr, oop compare);
848 static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
849
850 size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
851
852 static inline void increase_object_age(oop obj, uint additional_age);
853
854 // Return the object's age, or a sentinel value when the age can't
855 // necessarily be determined because of concurrent locking by the
856 // mutator
857 static inline uint get_object_age(oop obj);
858
859 void transfer_old_pointers_from_satb();
860
861 void log_heap_status(const char *msg) const;
862
863 private:
864 void trash_cset_regions();
865
866 // ---------- Testing helpers functions
867 //
868 private:
869 ShenandoahSharedFlag _inject_alloc_failure;
870
871 void try_inject_alloc_failure();
872 bool should_inject_alloc_failure();
873 };
874
875 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|