1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
27 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
28
29 #include "gc/shared/markBitMap.hpp"
30 #include "gc/shared/softRefPolicy.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "gc/shenandoah/shenandoahAsserts.hpp"
33 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
34 #include "gc/shenandoah/shenandoahLock.hpp"
35 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
36 #include "gc/shenandoah/shenandoahPadding.hpp"
37 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
38 #include "gc/shenandoah/shenandoahUnload.hpp"
39 #include "memory/metaspace.hpp"
40 #include "services/memoryManager.hpp"
41 #include "utilities/globalDefinitions.hpp"
42 #include "utilities/stack.hpp"
43
44 class ConcurrentGCTimer;
45 class ObjectIterateScanRootClosure;
46 class ShenandoahCollectorPolicy;
47 class ShenandoahControlThread;
48 class ShenandoahGCSession;
49 class ShenandoahGCStateResetter;
50 class ShenandoahHeuristics;
51 class ShenandoahMarkingContext;
52 class ShenandoahMode;
53 class ShenandoahPhaseTimings;
54 class ShenandoahHeap;
55 class ShenandoahHeapRegion;
56 class ShenandoahHeapRegionClosure;
57 class ShenandoahCollectionSet;
58 class ShenandoahFreeSet;
59 class ShenandoahConcurrentMark;
60 class ShenandoahFullGC;
61 class ShenandoahMonitoringSupport;
62 class ShenandoahPacer;
63 class ShenandoahReferenceProcessor;
64 class ShenandoahVerifier;
65 class ShenandoahWorkerThreads;
66 class VMStructs;
67
68 // Used for buffering per-region liveness data.
69 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
70 // The ShenandoahHeap array has max-workers elements, each of which is an array of
71 // uint16_t * max_regions. The choice of uint16_t is not accidental:
72 // there is a tradeoff between static/dynamic footprint that translates
73 // into cache pressure (which is already high during marking), and
74 // too many atomic updates. uint32_t is too large, uint8_t is too small.
75 typedef uint16_t ShenandoahLiveData;
76 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
77
78 class ShenandoahRegionIterator : public StackObj {
79 private:
80 ShenandoahHeap* _heap;
81
82 shenandoah_padding(0);
83 volatile size_t _index;
106 public:
107 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
108 virtual bool is_thread_safe() { return false; }
109 };
110
111 typedef ShenandoahLock ShenandoahHeapLock;
112 typedef ShenandoahLocker ShenandoahHeapLocker;
113 typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
114
115 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
116 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
117 // See ShenandoahControlThread for GC cycle structure.
118 //
119 class ShenandoahHeap : public CollectedHeap {
120 friend class ShenandoahAsserts;
121 friend class VMStructs;
122 friend class ShenandoahGCSession;
123 friend class ShenandoahGCStateResetter;
124 friend class ShenandoahParallelObjectIterator;
125 friend class ShenandoahSafepoint;
126 // Supported GC
127 friend class ShenandoahConcurrentGC;
128 friend class ShenandoahDegenGC;
129 friend class ShenandoahFullGC;
130 friend class ShenandoahUnload;
131
132 // ---------- Locks that guard important data structures in Heap
133 //
134 private:
135 ShenandoahHeapLock _lock;
136
137 public:
138 ShenandoahHeapLock* lock() {
139 return &_lock;
140 }
141
142 // ---------- Initialization, termination, identification, printing routines
143 //
144 public:
145 static ShenandoahHeap* heap();
146
147 const char* name() const override { return "Shenandoah"; }
148 ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
149
150 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
151 jint initialize() override;
152 void post_initialize() override;
153 void initialize_mode();
154 void initialize_heuristics();
155
156 void initialize_serviceability() override;
157
158 void print_on(outputStream* st) const override;
159 void print_extended_on(outputStream *st) const override;
160 void print_tracing_info() const override;
161 void print_heap_regions_on(outputStream* st) const;
162
163 void stop() override;
164
165 void prepare_for_verify() override;
166 void verify(VerifyOption vo) override;
167
168 // WhiteBox testing support.
169 bool supports_concurrent_gc_breakpoints() const override {
170 return true;
171 }
172
173 // ---------- Heap counters and metrics
174 //
175 private:
176 size_t _initial_size;
177 size_t _minimum_size;
178 volatile size_t _soft_max_size;
179 shenandoah_padding(0);
180 volatile size_t _used;
181 volatile size_t _committed;
182 volatile size_t _bytes_allocated_since_gc_start;
183 shenandoah_padding(1);
184
185 public:
186 void increase_used(size_t bytes);
187 void decrease_used(size_t bytes);
188 void set_used(size_t bytes);
189
190 void increase_committed(size_t bytes);
191 void decrease_committed(size_t bytes);
192 void increase_allocated(size_t bytes);
193
194 size_t bytes_allocated_since_gc_start();
195 void reset_bytes_allocated_since_gc_start();
196
197 size_t min_capacity() const;
198 size_t max_capacity() const override;
199 size_t soft_max_capacity() const;
200 size_t initial_capacity() const;
201 size_t capacity() const override;
202 size_t used() const override;
203 size_t committed() const;
204
205 void set_soft_max_capacity(size_t v);
206
207 // ---------- Workers handling
208 //
209 private:
210 uint _max_workers;
211 ShenandoahWorkerThreads* _workers;
212 ShenandoahWorkerThreads* _safepoint_workers;
213
214 public:
215 uint max_workers();
216 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
217
218 WorkerThreads* workers() const;
219 WorkerThreads* safepoint_workers() override;
220
221 void gc_threads_do(ThreadClosure* tcl) const override;
222
223 // ---------- Heap regions handling machinery
224 //
225 private:
226 MemRegion _heap_region;
227 bool _heap_region_special;
228 size_t _num_regions;
229 ShenandoahHeapRegion** _regions;
230 ShenandoahRegionIterator _update_refs_iterator;
231
232 public:
233
234 inline HeapWord* base() const { return _heap_region.start(); }
235
236 inline size_t num_regions() const { return _num_regions; }
237 inline bool is_heap_region_special() { return _heap_region_special; }
238
239 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
240 inline size_t heap_region_index_containing(const void* addr) const;
241
242 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
243
244 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
245 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
246
247 // ---------- GC state machinery
248 //
249 // GC state describes the important parts of collector state, that may be
250 // used to make barrier selection decisions in the native and generated code.
251 // Multiple bits can be set at once.
252 //
253 // Important invariant: when GC state is zero, the heap is stable, and no barriers
254 // are required.
255 //
256 public:
257 enum GCStateBitPos {
258 // Heap has forwarded objects: needs LRB barriers.
259 HAS_FORWARDED_BITPOS = 0,
260
261 // Heap is under marking: needs SATB barriers.
262 MARKING_BITPOS = 1,
263
264 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
265 EVACUATION_BITPOS = 2,
266
267 // Heap is under updating: needs no additional barriers.
268 UPDATEREFS_BITPOS = 3,
269
270 // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
271 WEAK_ROOTS_BITPOS = 4,
272 };
273
274 enum GCState {
275 STABLE = 0,
276 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
277 MARKING = 1 << MARKING_BITPOS,
278 EVACUATION = 1 << EVACUATION_BITPOS,
279 UPDATEREFS = 1 << UPDATEREFS_BITPOS,
280 WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
281 };
282
283 private:
284 bool _gc_state_changed;
285 ShenandoahSharedBitmap _gc_state;
286 ShenandoahSharedFlag _degenerated_gc_in_progress;
287 ShenandoahSharedFlag _full_gc_in_progress;
288 ShenandoahSharedFlag _full_gc_move_in_progress;
289 ShenandoahSharedFlag _progress_last_gc;
290 ShenandoahSharedFlag _concurrent_strong_root_in_progress;
291
292 // This updates the singlular, global gc state. This must happen on a safepoint.
293 void set_gc_state(uint mask, bool value);
294
295 public:
296 char gc_state() const;
297
298 // This copies the global gc state into a thread local variable for java threads.
299 // It is primarily intended to support quick access at barriers.
300 void propagate_gc_state_to_java_threads();
301
302 // This is public to support assertions that the state hasn't been changed off of
303 // a safepoint and that any changes were propagated to java threads after the safepoint.
304 bool has_gc_state_changed() const { return _gc_state_changed; }
305
306 void set_concurrent_mark_in_progress(bool in_progress);
307 void set_evacuation_in_progress(bool in_progress);
308 void set_update_refs_in_progress(bool in_progress);
309 void set_degenerated_gc_in_progress(bool in_progress);
310 void set_full_gc_in_progress(bool in_progress);
311 void set_full_gc_move_in_progress(bool in_progress);
312 void set_has_forwarded_objects(bool cond);
313 void set_concurrent_strong_root_in_progress(bool cond);
314 void set_concurrent_weak_root_in_progress(bool cond);
315
316 inline bool is_stable() const;
317 inline bool is_idle() const;
318 inline bool is_concurrent_mark_in_progress() const;
319 inline bool is_update_refs_in_progress() const;
320 inline bool is_evacuation_in_progress() const;
321 inline bool is_degenerated_gc_in_progress() const;
322 inline bool is_full_gc_in_progress() const;
323 inline bool is_full_gc_move_in_progress() const;
324 inline bool has_forwarded_objects() const;
325
326 inline bool is_stw_gc_in_progress() const;
327 inline bool is_concurrent_strong_root_in_progress() const;
328 inline bool is_concurrent_weak_root_in_progress() const;
329
330 private:
331 enum CancelState {
332 // Normal state. GC has not been cancelled and is open for cancellation.
333 // Worker threads can suspend for safepoint.
334 CANCELLABLE,
335
336 // GC has been cancelled. Worker threads can not suspend for
337 // safepoint but must finish their work as soon as possible.
338 CANCELLED
339 };
340
341 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
342 bool try_cancel_gc();
343
344 public:
345
346 inline bool cancelled_gc() const;
347 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
348
349 inline void clear_cancelled_gc();
350
351 void cancel_gc(GCCause::Cause cause);
352
353 public:
354 // Elastic heap support
355 void entry_uncommit(double shrink_before, size_t shrink_until);
356 void op_uncommit(double shrink_before, size_t shrink_until);
357
358 private:
359 // GC support
360 // Reset bitmap, prepare regions for new GC cycle
361 void prepare_gc();
362 void prepare_regions_and_collection_set(bool concurrent);
363 // Evacuation
364 void evacuate_collection_set(bool concurrent);
365 // Concurrent root processing
366 void prepare_concurrent_roots();
367 void finish_concurrent_roots();
368 // Concurrent class unloading support
369 void do_class_unloading();
370 // Reference updating
371 void prepare_update_heap_references(bool concurrent);
372 void update_heap_references(bool concurrent);
373 // Final update region states
374 void update_heap_region_states(bool concurrent);
375 void rebuild_free_set(bool concurrent);
376
377 void rendezvous_threads();
378 void recycle_trash();
379 public:
380 void notify_gc_progress() { _progress_last_gc.set(); }
381 void notify_gc_no_progress() { _progress_last_gc.unset(); }
382
383 //
384 // Mark support
385 private:
386 ShenandoahControlThread* _control_thread;
387 ShenandoahCollectorPolicy* _shenandoah_policy;
388 ShenandoahMode* _gc_mode;
389 ShenandoahHeuristics* _heuristics;
390 ShenandoahFreeSet* _free_set;
391 ShenandoahPacer* _pacer;
392 ShenandoahVerifier* _verifier;
393
394 ShenandoahPhaseTimings* _phase_timings;
395
396 ShenandoahControlThread* control_thread() { return _control_thread; }
397
398 public:
399 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
400 ShenandoahMode* mode() const { return _gc_mode; }
401 ShenandoahHeuristics* heuristics() const { return _heuristics; }
402 ShenandoahFreeSet* free_set() const { return _free_set; }
403 ShenandoahPacer* pacer() const { return _pacer; }
404
405 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
406
407 ShenandoahVerifier* verifier();
408
409 // ---------- VM subsystem bindings
410 //
411 private:
412 ShenandoahMonitoringSupport* _monitoring_support;
413 MemoryPool* _memory_pool;
414 GCMemoryManager _stw_memory_manager;
415 GCMemoryManager _cycle_memory_manager;
416 ConcurrentGCTimer* _gc_timer;
417 SoftRefPolicy _soft_ref_policy;
418
419 // For exporting to SA
420 int _log_min_obj_alignment_in_bytes;
421 public:
422 ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
423 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
424 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
425 SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
426
427 GrowableArray<GCMemoryManager*> memory_managers() override;
428 GrowableArray<MemoryPool*> memory_pools() override;
429 MemoryUsage memory_usage() override;
430 GCTracer* tracer();
431 ConcurrentGCTimer* gc_timer() const;
432
433 // ---------- Reference processing
434 //
435 private:
436 ShenandoahReferenceProcessor* const _ref_processor;
437
438 public:
439 ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
440
441 // ---------- Class Unloading
442 //
443 private:
444 ShenandoahSharedFlag _unload_classes;
445 ShenandoahUnload _unloader;
446
447 public:
448 void set_unload_classes(bool uc);
449 bool unload_classes() const;
450
451 // Perform STW class unloading and weak root cleaning
452 void parallel_cleaning(bool full_gc);
453
454 private:
455 void stw_unload_classes(bool full_gc);
456 void stw_process_weak_roots(bool full_gc);
457 void stw_weak_refs(bool full_gc);
458
459 // Heap iteration support
460 void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
461 bool prepare_aux_bitmap_for_iteration();
462 void reclaim_aux_bitmap_for_iteration();
463
464 // ---------- Generic interface hooks
465 // Minor things that super-interface expects us to implement to play nice with
466 // the rest of runtime. Some of the things here are not required to be implemented,
467 // and can be stubbed out.
468 //
469 public:
470 bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
471
472 bool is_in(const void* p) const override;
473
474 bool requires_barriers(stackChunkOop obj) const override;
475
476 MemRegion reserved_region() const { return _reserved; }
477 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
478
479 void collect_as_vm_thread(GCCause::Cause cause) override;
480 void collect(GCCause::Cause cause) override;
481 void do_full_collection(bool clear_all_soft_refs) override;
482
483 // Used for parsing heap during error printing
484 HeapWord* block_start(const void* addr) const;
485 bool block_is_obj(const HeapWord* addr) const;
486 bool print_location(outputStream* st, void* addr) const override;
487
488 // Used for native heap walkers: heap dumpers, mostly
489 void object_iterate(ObjectClosure* cl) override;
490 // Parallel heap iteration support
491 ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
492
493 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
506 void unregister_nmethod(nmethod* nm) override;
507 void verify_nmethod(nmethod* nm) override {}
508
509 // ---------- Pinning hooks
510 //
511 public:
512 // Shenandoah supports per-object (per-region) pinning
513 void pin_object(JavaThread* thread, oop obj) override;
514 void unpin_object(JavaThread* thread, oop obj) override;
515
516 void sync_pinned_region_status();
517 void assert_pinned_region_status() NOT_DEBUG_RETURN;
518
519 // ---------- Concurrent Stack Processing support
520 //
521 public:
522 bool uses_stack_watermark_barrier() const override { return true; }
523
524 // ---------- Allocation support
525 //
526 private:
527 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
528 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
529 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
530 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
531
532 public:
533 HeapWord* allocate_memory(ShenandoahAllocRequest& request);
534 HeapWord* mem_allocate(size_t size, bool* what) override;
535 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
536 size_t size,
537 Metaspace::MetadataType mdtype) override;
538
539 void notify_mutator_alloc_words(size_t words, bool waste);
540
541 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
542 size_t tlab_capacity(Thread *thr) const override;
543 size_t unsafe_max_tlab_alloc(Thread *thread) const override;
544 size_t max_tlab_size() const override;
545 size_t tlab_used(Thread* ignored) const override;
546
547 void ensure_parsability(bool retire_labs) override;
548
549 void labs_make_parsable();
550 void tlabs_retire(bool resize);
551 void gclabs_retire(bool resize);
552
553 // ---------- Marking support
554 //
555 private:
556 ShenandoahMarkingContext* _marking_context;
557 MemRegion _bitmap_region;
558 MemRegion _aux_bitmap_region;
559 MarkBitMap _verification_bit_map;
560 MarkBitMap _aux_bit_map;
561
562 size_t _bitmap_size;
563 size_t _bitmap_regions_per_slice;
564 size_t _bitmap_bytes_per_slice;
565
566 size_t _pretouch_heap_page_size;
567 size_t _pretouch_bitmap_page_size;
568
569 bool _bitmap_region_special;
570 bool _aux_bitmap_region_special;
571
572 ShenandoahLiveData** _liveness_cache;
573
574 public:
575 inline ShenandoahMarkingContext* complete_marking_context() const;
576 inline ShenandoahMarkingContext* marking_context() const;
577 inline void mark_complete_marking_context();
578 inline void mark_incomplete_marking_context();
579
580 template<class T>
581 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
582
583 template<class T>
584 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
585
586 template<class T>
587 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
588
589 void reset_mark_bitmap();
590
591 // SATB barriers hooks
592 inline bool requires_marking(const void* entry) const;
593
594 // Support for bitmap uncommits
595 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
596 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
597 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
598
599 // Liveness caching support
600 ShenandoahLiveData* get_liveness_cache(uint worker_id);
601 void flush_liveness_cache(uint worker_id);
602
603 size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
604
605 // ---------- Evacuation support
606 //
607 private:
608 ShenandoahCollectionSet* _collection_set;
609 ShenandoahEvacOOMHandler _oom_evac_handler;
610
611 public:
612 static address in_cset_fast_test_addr();
613
614 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
615
616 // Checks if object is in the collection set.
617 inline bool in_collection_set(oop obj) const;
618
619 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
620 inline bool in_collection_set_loc(void* loc) const;
621
622 // Evacuates object src. Returns the evacuated object, either evacuated
623 // by this thread, or by some other thread.
624 inline oop evacuate_object(oop src, Thread* thread);
625
626 // Call before/after evacuation.
627 inline void enter_evacuation(Thread* t);
628 inline void leave_evacuation(Thread* t);
629
630 // ---------- Helper functions
631 //
632 public:
633 template <class T>
634 inline void conc_update_with_forwarded(T* p);
635
636 template <class T>
637 inline void update_with_forwarded(T* p);
638
639 static inline void atomic_update_oop(oop update, oop* addr, oop compare);
640 static inline void atomic_update_oop(oop update, narrowOop* addr, oop compare);
641 static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
642
643 static inline bool atomic_update_oop_check(oop update, oop* addr, oop compare);
644 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, oop compare);
645 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
646
647 static inline void atomic_clear_oop( oop* addr, oop compare);
648 static inline void atomic_clear_oop(narrowOop* addr, oop compare);
649 static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
650
651 void trash_humongous_region_at(ShenandoahHeapRegion *r);
652
653 private:
654 void trash_cset_regions();
655
656 // ---------- Testing helpers functions
657 //
658 private:
659 ShenandoahSharedFlag _inject_alloc_failure;
660
661 void try_inject_alloc_failure();
662 bool should_inject_alloc_failure();
663 };
664
665 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|
1 /*
2 * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
28 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
29
30 #include "gc/shared/markBitMap.hpp"
31 #include "gc/shared/softRefPolicy.hpp"
32 #include "gc/shared/collectedHeap.hpp"
33 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
34 #include "gc/shenandoah/shenandoahAsserts.hpp"
35 #include "gc/shenandoah/shenandoahController.hpp"
36 #include "gc/shenandoah/shenandoahLock.hpp"
37 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
38 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
39 #include "gc/shenandoah/shenandoahGenerationType.hpp"
40 #include "gc/shenandoah/shenandoahGenerationSizer.hpp"
41 #include "gc/shenandoah/shenandoahMmuTracker.hpp"
42 #include "gc/shenandoah/mode/shenandoahMode.hpp"
43 #include "gc/shenandoah/shenandoahPadding.hpp"
44 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
45 #include "gc/shenandoah/shenandoahUnload.hpp"
46 #include "memory/metaspace.hpp"
47 #include "services/memoryManager.hpp"
48 #include "utilities/globalDefinitions.hpp"
49 #include "utilities/stack.hpp"
50
51 class ConcurrentGCTimer;
52 class ObjectIterateScanRootClosure;
53 class ShenandoahCollectorPolicy;
54 class ShenandoahGCSession;
55 class ShenandoahGCStateResetter;
56 class ShenandoahGeneration;
57 class ShenandoahYoungGeneration;
58 class ShenandoahOldGeneration;
59 class ShenandoahHeuristics;
60 class ShenandoahMarkingContext;
61 class ShenandoahMode;
62 class ShenandoahPhaseTimings;
63 class ShenandoahHeap;
64 class ShenandoahHeapRegion;
65 class ShenandoahHeapRegionClosure;
66 class ShenandoahCollectionSet;
67 class ShenandoahFreeSet;
68 class ShenandoahConcurrentMark;
69 class ShenandoahFullGC;
70 class ShenandoahMonitoringSupport;
71 class ShenandoahPacer;
72 class ShenandoahReferenceProcessor;
73 class ShenandoahUncommitThread;
74 class ShenandoahVerifier;
75 class ShenandoahWorkerThreads;
76 class VMStructs;
77
78 // Used for buffering per-region liveness data.
79 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
80 // The ShenandoahHeap array has max-workers elements, each of which is an array of
81 // uint16_t * max_regions. The choice of uint16_t is not accidental:
82 // there is a tradeoff between static/dynamic footprint that translates
83 // into cache pressure (which is already high during marking), and
84 // too many atomic updates. uint32_t is too large, uint8_t is too small.
85 typedef uint16_t ShenandoahLiveData;
86 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
87
88 class ShenandoahRegionIterator : public StackObj {
89 private:
90 ShenandoahHeap* _heap;
91
92 shenandoah_padding(0);
93 volatile size_t _index;
116 public:
117 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
118 virtual bool is_thread_safe() { return false; }
119 };
120
121 typedef ShenandoahLock ShenandoahHeapLock;
122 typedef ShenandoahLocker ShenandoahHeapLocker;
123 typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
124
125 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
126 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
127 // See ShenandoahControlThread for GC cycle structure.
128 //
129 class ShenandoahHeap : public CollectedHeap {
130 friend class ShenandoahAsserts;
131 friend class VMStructs;
132 friend class ShenandoahGCSession;
133 friend class ShenandoahGCStateResetter;
134 friend class ShenandoahParallelObjectIterator;
135 friend class ShenandoahSafepoint;
136
137 // Supported GC
138 friend class ShenandoahConcurrentGC;
139 friend class ShenandoahOldGC;
140 friend class ShenandoahDegenGC;
141 friend class ShenandoahFullGC;
142 friend class ShenandoahUnload;
143
144 // ---------- Locks that guard important data structures in Heap
145 //
146 private:
147 ShenandoahHeapLock _lock;
148
149 // Indicates the generation whose collection is in
150 // progress. Mutator threads aren't allowed to read
151 // this field.
152 ShenandoahGeneration* _gc_generation;
153
154 // This is set and cleared by only the VMThread
155 // at each STW pause (safepoint) to the value seen in
156 // _gc_generation. This allows the value to be always consistently
157 // seen by all mutators as well as all GC worker threads.
158 // In that sense, it's a stable snapshot of _gc_generation that is
159 // updated at each STW pause associated with a ShenandoahVMOp.
160 ShenandoahGeneration* _active_generation;
161
162 public:
163 ShenandoahHeapLock* lock() {
164 return &_lock;
165 }
166
167 ShenandoahGeneration* gc_generation() const {
168 // We don't want this field read by a mutator thread
169 assert(!Thread::current()->is_Java_thread(), "Not allowed");
170 // value of _gc_generation field, see above
171 return _gc_generation;
172 }
173
174 ShenandoahGeneration* active_generation() const {
175 // value of _active_generation field, see above
176 return _active_generation;
177 }
178
179 // Set the _gc_generation field
180 void set_gc_generation(ShenandoahGeneration* generation);
181
182 // Copy the value in the _gc_generation field into
183 // the _active_generation field: can only be called at
184 // a safepoint by the VMThread.
185 void set_active_generation();
186
187 ShenandoahHeuristics* heuristics();
188
189 // ---------- Initialization, termination, identification, printing routines
190 //
191 public:
192 static ShenandoahHeap* heap();
193
194 const char* name() const override { return "Shenandoah"; }
195 ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
196
197 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
198 jint initialize() override;
199 void post_initialize() override;
200 void initialize_mode();
201 virtual void initialize_heuristics();
202 virtual void print_init_logger() const;
203 void initialize_serviceability() override;
204
205 void print_on(outputStream* st) const override;
206 void print_extended_on(outputStream *st) const override;
207 void print_tracing_info() const override;
208 void print_heap_regions_on(outputStream* st) const;
209
210 void stop() override;
211
212 void prepare_for_verify() override;
213 void verify(VerifyOption vo) override;
214
215 // WhiteBox testing support.
216 bool supports_concurrent_gc_breakpoints() const override {
217 return true;
218 }
219
220 // ---------- Heap counters and metrics
221 //
222 private:
223 size_t _initial_size;
224 size_t _minimum_size;
225
226 volatile size_t _soft_max_size;
227 shenandoah_padding(0);
228 volatile size_t _committed;
229 shenandoah_padding(1);
230
231 void increase_used(const ShenandoahAllocRequest& req);
232
233 public:
234 void increase_used(ShenandoahGeneration* generation, size_t bytes);
235 void decrease_used(ShenandoahGeneration* generation, size_t bytes);
236 void increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
237 void decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes);
238
239 void increase_committed(size_t bytes);
240 void decrease_committed(size_t bytes);
241
242 void reset_bytes_allocated_since_gc_start();
243
244 size_t min_capacity() const;
245 size_t max_capacity() const override;
246 size_t soft_max_capacity() const;
247 size_t initial_capacity() const;
248 size_t capacity() const override;
249 size_t used() const override;
250 size_t committed() const;
251
252 void set_soft_max_capacity(size_t v);
253
254 // ---------- Periodic Tasks
255 //
256 public:
257 // Notify heuristics and region state change logger that the state of the heap has changed
258 void notify_heap_changed();
259
260 // Force counters to update
261 void set_forced_counters_update(bool value);
262
263 // Update counters if forced flag is set
264 void handle_force_counters_update();
265
266 // ---------- Workers handling
267 //
268 private:
269 uint _max_workers;
270 ShenandoahWorkerThreads* _workers;
271 ShenandoahWorkerThreads* _safepoint_workers;
272
273 virtual void initialize_controller();
274
275 public:
276 uint max_workers();
277 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
278
279 WorkerThreads* workers() const;
280 WorkerThreads* safepoint_workers() override;
281
282 void gc_threads_do(ThreadClosure* tcl) const override;
283
284 // ---------- Heap regions handling machinery
285 //
286 private:
287 MemRegion _heap_region;
288 bool _heap_region_special;
289 size_t _num_regions;
290 ShenandoahHeapRegion** _regions;
291 uint8_t* _affiliations; // Holds array of enum ShenandoahAffiliation, including FREE status in non-generational mode
292
293 public:
294
295 inline HeapWord* base() const { return _heap_region.start(); }
296
297 inline size_t num_regions() const { return _num_regions; }
298 inline bool is_heap_region_special() { return _heap_region_special; }
299
300 inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const;
301 inline size_t heap_region_index_containing(const void* addr) const;
302
303 inline ShenandoahHeapRegion* get_region(size_t region_idx) const;
304
305 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
306 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
307
308 inline ShenandoahMmuTracker* mmu_tracker() { return &_mmu_tracker; };
309
310 // ---------- GC state machinery
311 //
312 // GC state describes the important parts of collector state, that may be
313 // used to make barrier selection decisions in the native and generated code.
314 // Multiple bits can be set at once.
315 //
316 // Important invariant: when GC state is zero, the heap is stable, and no barriers
317 // are required.
318 //
319 public:
320 enum GCStateBitPos {
321 // Heap has forwarded objects: needs LRB barriers.
322 HAS_FORWARDED_BITPOS = 0,
323
324 // Heap is under marking: needs SATB barriers.
325 // For generational mode, it means either young or old marking, or both.
326 MARKING_BITPOS = 1,
327
328 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
329 EVACUATION_BITPOS = 2,
330
331 // Heap is under updating: needs no additional barriers.
332 UPDATE_REFS_BITPOS = 3,
333
334 // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
335 WEAK_ROOTS_BITPOS = 4,
336
337 // Young regions are under marking, need SATB barriers.
338 YOUNG_MARKING_BITPOS = 5,
339
340 // Old regions are under marking, need SATB barriers.
341 OLD_MARKING_BITPOS = 6
342 };
343
344 enum GCState {
345 STABLE = 0,
346 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
347 MARKING = 1 << MARKING_BITPOS,
348 EVACUATION = 1 << EVACUATION_BITPOS,
349 UPDATE_REFS = 1 << UPDATE_REFS_BITPOS,
350 WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
351 YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS,
352 OLD_MARKING = 1 << OLD_MARKING_BITPOS
353 };
354
355 private:
356 bool _gc_state_changed;
357 ShenandoahSharedBitmap _gc_state;
358 ShenandoahSharedFlag _heap_changed;
359 ShenandoahSharedFlag _degenerated_gc_in_progress;
360 ShenandoahSharedFlag _full_gc_in_progress;
361 ShenandoahSharedFlag _full_gc_move_in_progress;
362 ShenandoahSharedFlag _concurrent_strong_root_in_progress;
363
364 size_t _gc_no_progress_count;
365
366 // This updates the singular, global gc state. This call must happen on a safepoint.
367 void set_gc_state_at_safepoint(uint mask, bool value);
368
369 // This also updates the global gc state, but does not need to be called on a safepoint.
370 // Critically, this method will _not_ flag that the global gc state has changed and threads
371 // will continue to use their thread local copy. This is expected to be used in conjunction
372 // with a handshake operation to propagate the new gc state.
373 void set_gc_state_concurrent(uint mask, bool value);
374
375 public:
376 // This returns the raw value of the singular, global gc state.
377 char gc_state() const;
378
379 // Compares the given state against either the global gc state, or the thread local state.
380 // The global gc state may change on a safepoint and is the correct value to use until
381 // the global gc state has been propagated to all threads (after which, this method will
382 // compare against the thread local state). The thread local gc state may also be changed
383 // by a handshake operation, in which case, this function continues using the updated thread
384 // local value.
385 bool is_gc_state(GCState state) const;
386
387 // This copies the global gc state into a thread local variable for all threads.
388 // The thread local gc state is primarily intended to support quick access at barriers.
389 // All threads are updated because in some cases the control thread or the vm thread may
390 // need to execute the load reference barrier.
391 void propagate_gc_state_to_all_threads();
392
393 // This is public to support assertions that the state hasn't been changed off of
394 // a safepoint and that any changes were propagated to threads after the safepoint.
395 bool has_gc_state_changed() const { return _gc_state_changed; }
396
397 // Returns true if allocations have occurred in new regions or if regions have been
398 // uncommitted since the previous calls. This call will reset the flag to false.
399 bool has_changed() {
400 return _heap_changed.try_unset();
401 }
402
403 void set_concurrent_young_mark_in_progress(bool in_progress);
404 void set_concurrent_old_mark_in_progress(bool in_progress);
405 void set_evacuation_in_progress(bool in_progress);
406 void set_update_refs_in_progress(bool in_progress);
407 void set_degenerated_gc_in_progress(bool in_progress);
408 void set_full_gc_in_progress(bool in_progress);
409 void set_full_gc_move_in_progress(bool in_progress);
410 void set_has_forwarded_objects(bool cond);
411 void set_concurrent_strong_root_in_progress(bool cond);
412 void set_concurrent_weak_root_in_progress(bool cond);
413
414 inline bool is_idle() const;
415 inline bool is_concurrent_mark_in_progress() const;
416 inline bool is_concurrent_young_mark_in_progress() const;
417 inline bool is_concurrent_old_mark_in_progress() const;
418 inline bool is_update_refs_in_progress() const;
419 inline bool is_evacuation_in_progress() const;
420 inline bool is_degenerated_gc_in_progress() const;
421 inline bool is_full_gc_in_progress() const;
422 inline bool is_full_gc_move_in_progress() const;
423 inline bool has_forwarded_objects() const;
424
425 inline bool is_stw_gc_in_progress() const;
426 inline bool is_concurrent_strong_root_in_progress() const;
427 inline bool is_concurrent_weak_root_in_progress() const;
428 bool is_prepare_for_old_mark_in_progress() const;
429
430 private:
431 void manage_satb_barrier(bool active);
432
433 // Records the time of the first successful cancellation request. This is used to measure
434 // the responsiveness of the heuristic when starting a cycle.
435 double _cancel_requested_time;
436
437 // Indicates the reason the current GC has been cancelled (GCCause::_no_gc means the gc is not cancelled).
438 ShenandoahSharedEnumFlag<GCCause::Cause> _cancelled_gc;
439
440 // Returns true if cancel request was successfully communicated.
441 // Returns false if some other thread already communicated cancel
442 // request. A true return value does not mean GC has been
443 // cancelled, only that the process of cancelling GC has begun.
444 bool try_cancel_gc(GCCause::Cause cause);
445
446 public:
447 // True if gc has been cancelled
448 inline bool cancelled_gc() const;
449
450 // Used by workers in the GC cycle to detect cancellation and honor STS requirements
451 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
452
453 // This indicates the reason the last GC cycle was cancelled.
454 inline GCCause::Cause cancelled_cause() const;
455
456 // Clears the cancellation cause and optionally resets the oom handler (cancelling an
457 // old mark does _not_ touch the oom handler).
458 inline void clear_cancelled_gc(bool clear_oom_handler = true);
459
460 void cancel_concurrent_mark();
461
462 // Returns true if and only if this call caused a gc to be cancelled.
463 bool cancel_gc(GCCause::Cause cause);
464
465 // Returns true if the soft maximum heap has been changed using management APIs.
466 bool check_soft_max_changed();
467
468 protected:
469 // This is shared between shConcurrentGC and shDegenerateGC so that degenerated
470 // GC can resume update refs from where the concurrent GC was cancelled. It is
471 // also used in shGenerationalHeap, which uses a different closure for update refs.
472 ShenandoahRegionIterator _update_refs_iterator;
473
474 private:
475 // GC support
476 // Evacuation
477 virtual void evacuate_collection_set(bool concurrent);
478 // Concurrent root processing
479 void prepare_concurrent_roots();
480 void finish_concurrent_roots();
481 // Concurrent class unloading support
482 void do_class_unloading();
483 // Reference updating
484 void prepare_update_heap_references();
485
486 // Retires LABs used for evacuation
487 void concurrent_prepare_for_update_refs();
488
489 // Turn off weak roots flag, purge old satb buffers in generational mode
490 void concurrent_final_roots(HandshakeClosure* handshake_closure = nullptr);
491
492 virtual void update_heap_references(bool concurrent);
493 // Final update region states
494 void update_heap_region_states(bool concurrent);
495 virtual void final_update_refs_update_region_states();
496
497 void rendezvous_threads();
498 void recycle_trash();
499 public:
500 void rebuild_free_set(bool concurrent);
501 void notify_gc_progress();
502 void notify_gc_no_progress();
503 size_t get_gc_no_progress_count() const;
504
505 // The uncommit thread targets soft max heap, notify this thread when that value has changed.
506 void notify_soft_max_changed();
507
508 // An explicit GC request may have freed regions, notify the uncommit thread.
509 void notify_explicit_gc_requested();
510
511 private:
512 ShenandoahGeneration* _global_generation;
513
514 protected:
515 // The control thread presides over concurrent collection cycles
516 ShenandoahController* _control_thread;
517
518 // The uncommit thread periodically attempts to uncommit regions that have been empty for longer than ShenandoahUncommitDelay
519 ShenandoahUncommitThread* _uncommit_thread;
520
521 ShenandoahYoungGeneration* _young_generation;
522 ShenandoahOldGeneration* _old_generation;
523
524 private:
525 ShenandoahCollectorPolicy* _shenandoah_policy;
526 ShenandoahMode* _gc_mode;
527 ShenandoahFreeSet* _free_set;
528 ShenandoahPacer* _pacer;
529 ShenandoahVerifier* _verifier;
530
531 ShenandoahPhaseTimings* _phase_timings;
532 ShenandoahMmuTracker _mmu_tracker;
533
534 public:
535 ShenandoahController* control_thread() const { return _control_thread; }
536
537 ShenandoahGeneration* global_generation() const { return _global_generation; }
538 ShenandoahYoungGeneration* young_generation() const {
539 assert(mode()->is_generational(), "Young generation requires generational mode");
540 return _young_generation;
541 }
542
543 ShenandoahOldGeneration* old_generation() const {
544 assert(mode()->is_generational(), "Old generation requires generational mode");
545 return _old_generation;
546 }
547
548 ShenandoahGeneration* generation_for(ShenandoahAffiliation affiliation) const;
549
550 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
551 ShenandoahMode* mode() const { return _gc_mode; }
552 ShenandoahFreeSet* free_set() const { return _free_set; }
553 ShenandoahPacer* pacer() const { return _pacer; }
554
555 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
556
557 ShenandoahEvacOOMHandler* oom_evac_handler() { return &_oom_evac_handler; }
558
559 void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
560 void on_cycle_end(ShenandoahGeneration* generation);
561
562 ShenandoahVerifier* verifier();
563
564 // ---------- VM subsystem bindings
565 //
566 private:
567 ShenandoahMonitoringSupport* _monitoring_support;
568 MemoryPool* _memory_pool;
569 GCMemoryManager _stw_memory_manager;
570 GCMemoryManager _cycle_memory_manager;
571 ConcurrentGCTimer* _gc_timer;
572 SoftRefPolicy _soft_ref_policy;
573
574 // For exporting to SA
575 int _log_min_obj_alignment_in_bytes;
576 public:
577 ShenandoahMonitoringSupport* monitoring_support() const { return _monitoring_support; }
578 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
579 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
580 SoftRefPolicy* soft_ref_policy() override { return &_soft_ref_policy; }
581
582 GrowableArray<GCMemoryManager*> memory_managers() override;
583 GrowableArray<MemoryPool*> memory_pools() override;
584 MemoryUsage memory_usage() override;
585 GCTracer* tracer();
586 ConcurrentGCTimer* gc_timer() const;
587
588 // ---------- Class Unloading
589 //
590 private:
591 ShenandoahSharedFlag _unload_classes;
592 ShenandoahUnload _unloader;
593
594 public:
595 void set_unload_classes(bool uc);
596 bool unload_classes() const;
597
598 // Perform STW class unloading and weak root cleaning
599 void parallel_cleaning(bool full_gc);
600
601 private:
602 void stw_unload_classes(bool full_gc);
603 void stw_process_weak_roots(bool full_gc);
604 void stw_weak_refs(bool full_gc);
605
606 inline void assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
607 ShenandoahAffiliation new_affiliation);
608
609 // Heap iteration support
610 void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
611 bool prepare_aux_bitmap_for_iteration();
612 void reclaim_aux_bitmap_for_iteration();
613
614 // ---------- Generic interface hooks
615 // Minor things that super-interface expects us to implement to play nice with
616 // the rest of runtime. Some of the things here are not required to be implemented,
617 // and can be stubbed out.
618 //
619 public:
620 bool is_maximal_no_gc() const override shenandoah_not_implemented_return(false);
621
622 // Check the pointer is in active part of Java heap.
623 // Use is_in_reserved to check if object is within heap bounds.
624 bool is_in(const void* p) const override;
625
626 // Returns true if the given oop belongs to a generation that is actively being collected.
627 inline bool is_in_active_generation(oop obj) const;
628 inline bool is_in_young(const void* p) const;
629 inline bool is_in_old(const void* p) const;
630
631 // Returns true iff the young generation is being collected and the given pointer
632 // is in the old generation. This is used to prevent the young collection from treating
633 // such an object as unreachable.
634 inline bool is_in_old_during_young_collection(oop obj) const;
635
636 inline ShenandoahAffiliation region_affiliation(const ShenandoahHeapRegion* r) const;
637 inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation);
638
639 inline ShenandoahAffiliation region_affiliation(size_t index) const;
640
641 bool requires_barriers(stackChunkOop obj) const override;
642
643 MemRegion reserved_region() const { return _reserved; }
644 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
645
646 void collect_as_vm_thread(GCCause::Cause cause) override;
647 void collect(GCCause::Cause cause) override;
648 void do_full_collection(bool clear_all_soft_refs) override;
649
650 // Used for parsing heap during error printing
651 HeapWord* block_start(const void* addr) const;
652 bool block_is_obj(const HeapWord* addr) const;
653 bool print_location(outputStream* st, void* addr) const override;
654
655 // Used for native heap walkers: heap dumpers, mostly
656 void object_iterate(ObjectClosure* cl) override;
657 // Parallel heap iteration support
658 ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
659
660 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
673 void unregister_nmethod(nmethod* nm) override;
674 void verify_nmethod(nmethod* nm) override {}
675
676 // ---------- Pinning hooks
677 //
678 public:
679 // Shenandoah supports per-object (per-region) pinning
680 void pin_object(JavaThread* thread, oop obj) override;
681 void unpin_object(JavaThread* thread, oop obj) override;
682
683 void sync_pinned_region_status();
684 void assert_pinned_region_status() NOT_DEBUG_RETURN;
685
686 // ---------- Concurrent Stack Processing support
687 //
688 public:
689 bool uses_stack_watermark_barrier() const override { return true; }
690
691 // ---------- Allocation support
692 //
693 protected:
694 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
695
696 private:
697 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
698 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
699 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
700
701 // We want to retry an unsuccessful attempt at allocation until at least a full gc.
702 bool should_retry_allocation(size_t original_full_gc_count) const;
703
704 public:
705 HeapWord* allocate_memory(ShenandoahAllocRequest& request);
706 HeapWord* mem_allocate(size_t size, bool* what) override;
707 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
708 size_t size,
709 Metaspace::MetadataType mdtype) override;
710
711 void notify_mutator_alloc_words(size_t words, size_t waste);
712
713 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
714 size_t tlab_capacity(Thread *thr) const override;
715 size_t unsafe_max_tlab_alloc(Thread *thread) const override;
716 size_t max_tlab_size() const override;
717 size_t tlab_used(Thread* ignored) const override;
718
719 void ensure_parsability(bool retire_labs) override;
720
721 void labs_make_parsable();
722 void tlabs_retire(bool resize);
723 void gclabs_retire(bool resize);
724
725 // ---------- Marking support
726 //
727 private:
728 ShenandoahMarkingContext* _marking_context;
729 MemRegion _bitmap_region;
730 MemRegion _aux_bitmap_region;
731 MarkBitMap _verification_bit_map;
732 MarkBitMap _aux_bit_map;
733
734 size_t _bitmap_size;
735 size_t _bitmap_regions_per_slice;
736 size_t _bitmap_bytes_per_slice;
737
738 size_t _pretouch_heap_page_size;
739 size_t _pretouch_bitmap_page_size;
740
741 bool _bitmap_region_special;
742 bool _aux_bitmap_region_special;
743
744 ShenandoahLiveData** _liveness_cache;
745
746 public:
747 inline ShenandoahMarkingContext* complete_marking_context() const;
748 inline ShenandoahMarkingContext* marking_context() const;
749
750 template<class T>
751 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
752
753 template<class T>
754 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
755
756 template<class T>
757 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
758
759 // SATB barriers hooks
760 inline bool requires_marking(const void* entry) const;
761
762 // Support for bitmap uncommits
763 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
764 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
765 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
766
767 // During concurrent reset, the control thread will zero out the mark bitmaps for committed regions.
768 // This cannot happen when the uncommit thread is simultaneously trying to uncommit regions and their bitmaps.
769 // To prevent these threads from working at the same time, we provide these methods for the control thread to
770 // prevent the uncommit thread from working while a collection cycle is in progress.
771
772 // Forbid uncommits (will stop and wait if regions are being uncommitted)
773 void forbid_uncommit();
774
775 // Allow the uncommit thread to process regions
776 void allow_uncommit();
777 #ifdef ASSERT
778 bool is_uncommit_in_progress();
779 #endif
780
781 // Liveness caching support
782 ShenandoahLiveData* get_liveness_cache(uint worker_id);
783 void flush_liveness_cache(uint worker_id);
784
785 size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
786
787 // ---------- Evacuation support
788 //
789 private:
790 ShenandoahCollectionSet* _collection_set;
791 ShenandoahEvacOOMHandler _oom_evac_handler;
792
793 oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
794
795 public:
796 static address in_cset_fast_test_addr();
797
798 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
799
800 // Checks if object is in the collection set.
801 inline bool in_collection_set(oop obj) const;
802
803 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
804 inline bool in_collection_set_loc(void* loc) const;
805
806 // Evacuates or promotes object src. Returns the evacuated object, either evacuated
807 // by this thread, or by some other thread.
808 virtual oop evacuate_object(oop src, Thread* thread);
809
810 // Call before/after evacuation.
811 inline void enter_evacuation(Thread* t);
812 inline void leave_evacuation(Thread* t);
813
814 // ---------- Helper functions
815 //
816 public:
817 template <class T>
818 inline void conc_update_with_forwarded(T* p);
819
820 template <class T>
821 inline void update_with_forwarded(T* p);
822
823 static inline void atomic_update_oop(oop update, oop* addr, oop compare);
824 static inline void atomic_update_oop(oop update, narrowOop* addr, oop compare);
825 static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
826
827 static inline bool atomic_update_oop_check(oop update, oop* addr, oop compare);
828 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, oop compare);
829 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
830
831 static inline void atomic_clear_oop( oop* addr, oop compare);
832 static inline void atomic_clear_oop(narrowOop* addr, oop compare);
833 static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
834
835 size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
836
837 static inline void increase_object_age(oop obj, uint additional_age);
838
839 // Return the object's age, or a sentinel value when the age can't
840 // necessarily be determined because of concurrent locking by the
841 // mutator
842 static inline uint get_object_age(oop obj);
843
844 void log_heap_status(const char *msg) const;
845
846 private:
847 void trash_cset_regions();
848
849 // ---------- Testing helpers functions
850 //
851 private:
852 ShenandoahSharedFlag _inject_alloc_failure;
853
854 void try_inject_alloc_failure();
855 bool should_inject_alloc_failure();
856 };
857
858 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|