1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
28 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
29
30 #include "gc/shared/collectedHeap.hpp"
31 #include "gc/shared/markBitMap.hpp"
32 #include "gc/shenandoah/mode/shenandoahMode.hpp"
33 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
34 #include "gc/shenandoah/shenandoahAsserts.hpp"
35 #include "gc/shenandoah/shenandoahController.hpp"
36 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
37 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
38 #include "gc/shenandoah/shenandoahGenerationType.hpp"
39 #include "gc/shenandoah/shenandoahLock.hpp"
40 #include "gc/shenandoah/shenandoahMmuTracker.hpp"
41 #include "gc/shenandoah/shenandoahPadding.hpp"
42 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
43 #include "gc/shenandoah/shenandoahUnload.hpp"
44 #include "memory/metaspace.hpp"
45 #include "services/memoryManager.hpp"
46 #include "utilities/globalDefinitions.hpp"
47 #include "utilities/stack.hpp"
48
49 class ConcurrentGCTimer;
50 class ObjectIterateScanRootClosure;
51 class ShenandoahCollectorPolicy;
52 class ShenandoahGCSession;
53 class ShenandoahGCStateResetter;
54 class ShenandoahGeneration;
55 class ShenandoahYoungGeneration;
56 class ShenandoahOldGeneration;
57 class ShenandoahHeuristics;
58 class ShenandoahMarkingContext;
59 class ShenandoahMode;
60 class ShenandoahPhaseTimings;
61 class ShenandoahHeap;
62 class ShenandoahHeapRegion;
63 class ShenandoahHeapRegionClosure;
64 class ShenandoahCollectionSet;
65 class ShenandoahFreeSet;
66 class ShenandoahConcurrentMark;
67 class ShenandoahFullGC;
68 class ShenandoahMonitoringSupport;
69 class ShenandoahPacer;
70 class ShenandoahReferenceProcessor;
71 class ShenandoahUncommitThread;
72 class ShenandoahVerifier;
73 class ShenandoahWorkerThreads;
74 class VMStructs;
75
76 // Used for buffering per-region liveness data.
77 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
78 // The ShenandoahHeap array has max-workers elements, each of which is an array of
79 // uint16_t * max_regions. The choice of uint16_t is not accidental:
80 // there is a tradeoff between static/dynamic footprint that translates
81 // into cache pressure (which is already high during marking), and
82 // too many atomic updates. uint32_t is too large, uint8_t is too small.
83 typedef uint16_t ShenandoahLiveData;
84 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
85
86 class ShenandoahRegionIterator : public StackObj {
87 private:
88 ShenandoahHeap* _heap;
89
90 shenandoah_padding(0);
91 Atomic<size_t> _index;
92 shenandoah_padding(1);
93
94 // No implicit copying: iterators should be passed by reference to capture the state
95 NONCOPYABLE(ShenandoahRegionIterator);
96
97 public:
98 ShenandoahRegionIterator();
99 ShenandoahRegionIterator(ShenandoahHeap* heap);
100
101 // Reset iterator to default state
102 void reset();
103
104 // Returns next region, or null if there are no more regions.
105 // This is multi-thread-safe.
106 inline ShenandoahHeapRegion* next();
107
108 // This is *not* MT safe. However, in the absence of multithreaded access, it
109 // can be used to determine if there is more work to do.
110 bool has_next() const;
111 };
112
113 class ShenandoahHeapRegionClosure : public StackObj {
114 public:
115 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
116 virtual size_t parallel_region_stride() { return ShenandoahParallelRegionStride; }
117 virtual bool is_thread_safe() { return false; }
118 };
119
120 typedef ShenandoahLock ShenandoahHeapLock;
121 // ShenandoahHeapLocker implements locker to assure mutually exclusive access to the global heap data structures.
122 // Asserts in the implementation detect potential deadlock usage with regards the rebuild lock that is present
123 // in ShenandoahFreeSet. Whenever both locks are acquired, this lock should be acquired before the
124 // ShenandoahFreeSet rebuild lock.
125 class ShenandoahHeapLocker : public StackObj {
126 private:
127 ShenandoahHeapLock* _lock;
128 public:
129 ShenandoahHeapLocker(ShenandoahHeapLock* lock, bool allow_block_for_safepoint = false);
130
131 ~ShenandoahHeapLocker() {
132 _lock->unlock();
133 }
134 };
135
136 typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
137
138 // Shenandoah GC is low-pause concurrent GC that uses a load reference barrier
139 // for concurent evacuation and a snapshot-at-the-beginning write barrier for
140 // concurrent marking. See ShenandoahControlThread for GC cycle structure.
141 //
142 class ShenandoahHeap : public CollectedHeap {
143 friend class ShenandoahAsserts;
144 friend class VMStructs;
145 friend class ShenandoahGCSession;
146 friend class ShenandoahGCStateResetter;
147 friend class ShenandoahParallelObjectIterator;
148 friend class ShenandoahSafepoint;
149
150 // Supported GC
151 friend class ShenandoahConcurrentGC;
152 friend class ShenandoahOldGC;
153 friend class ShenandoahDegenGC;
154 friend class ShenandoahFullGC;
155 friend class ShenandoahUnload;
156
157 // ---------- Locks that guard important data structures in Heap
158 //
159 private:
160 ShenandoahHeapLock _lock;
161
162 // This is set and cleared by only the VMThread
163 // at each STW pause (safepoint) to the value given to the VM operation.
164 // This allows the value to be always consistently
165 // seen by all mutators as well as all GC worker threads.
166 ShenandoahGeneration* _active_generation;
167
168 protected:
169 void print_tracing_info() const override;
170 void stop() override;
171
172 public:
173 ShenandoahHeapLock* lock() {
174 return &_lock;
175 }
176
177 ShenandoahGeneration* active_generation() const {
178 // value of _active_generation field, see above
179 return _active_generation;
180 }
181
182 // Update the _active_generation field: can only be called at a safepoint by the VMThread.
183 void set_active_generation(ShenandoahGeneration* generation);
184
185 ShenandoahHeuristics* heuristics();
186
187 // ---------- Initialization, termination, identification, printing routines
188 //
189 public:
190 static ShenandoahHeap* heap();
191
192 const char* name() const override { return "Shenandoah"; }
193 ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
194
195 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
196 jint initialize() override;
197 void post_initialize() override;
198 virtual void initialize_generations();
199 void initialize_mode();
200 virtual void initialize_heuristics();
201 virtual void post_initialize_heuristics();
202 virtual void print_init_logger() const;
203 void initialize_serviceability() override;
204
205 void print_heap_on(outputStream* st) const override;
206 void print_gc_on(outputStream* st) const override;
207 void print_heap_regions_on(outputStream* st) const;
208
209 // Flushes cycle timings to global timings and prints the phase timings for the last completed cycle.
210 void process_gc_stats() const;
211
212 void prepare_for_verify() override;
213 void verify(VerifyOption vo) override;
214
215 // WhiteBox testing support.
216 bool supports_concurrent_gc_breakpoints() const override {
217 return true;
218 }
219
220 // ---------- Heap counters and metrics
221 //
222 private:
223 size_t _initial_size;
224 size_t _minimum_size;
225
226 Atomic<size_t> _soft_max_size;
227 shenandoah_padding(0);
228 Atomic<size_t> _committed;
229 shenandoah_padding(1);
230
231 public:
232 void increase_committed(size_t bytes);
233 void decrease_committed(size_t bytes);
234
235 void reset_bytes_allocated_since_gc_start();
236
237 size_t min_capacity() const;
238 size_t max_capacity() const override;
239 size_t soft_max_capacity() const;
240 size_t initial_capacity() const;
241 size_t capacity() const override;
242 size_t used() const override;
243 size_t committed() const;
244
245 void set_soft_max_capacity(size_t v);
246
247 // ---------- Periodic Tasks
248 //
249 public:
250 // Notify heuristics and region state change logger that the state of the heap has changed
251 void notify_heap_changed();
252
253 // Force counters to update
254 void set_forced_counters_update(bool value);
255
256 // Update counters if forced flag is set
257 void handle_force_counters_update();
258
259 // ---------- Workers handling
260 //
261 private:
262 uint _max_workers;
263 ShenandoahWorkerThreads* _workers;
264 ShenandoahWorkerThreads* _safepoint_workers;
265
266 virtual void initialize_controller();
267
268 public:
269 uint max_workers();
270 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
271
272 WorkerThreads* workers() const;
273 WorkerThreads* safepoint_workers() override;
274
275 void gc_threads_do(ThreadClosure* tcl) const override;
276
277 // ---------- Heap regions handling machinery
278 //
279 private:
280 MemRegion _heap_region;
281 bool _heap_region_special;
282 size_t _num_regions;
283 ShenandoahHeapRegion** _regions;
284 uint8_t* _affiliations; // Holds array of enum ShenandoahAffiliation, including FREE status in non-generational mode
285
286 public:
287
288 inline HeapWord* base() const { return _heap_region.start(); }
289 inline HeapWord* end() const { return _heap_region.end(); }
290
291 inline size_t num_regions() const { return _num_regions; }
292 inline bool is_heap_region_special() { return _heap_region_special; }
293
294 inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const;
295 inline size_t heap_region_index_containing(const void* addr) const;
296
297 inline ShenandoahHeapRegion* get_region(size_t region_idx) const;
298
299 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
300 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
301
302 inline ShenandoahMmuTracker* mmu_tracker() { return &_mmu_tracker; };
303
304 // ---------- GC state machinery
305 //
306 // GC state describes the important parts of collector state, that may be
307 // used to make barrier selection decisions in the native and generated code.
308 // Multiple bits can be set at once.
309 //
310 // Important invariant: when GC state is zero, the heap is stable, and no barriers
311 // are required.
312 //
313 public:
314 enum GCStateBitPos {
315 // Heap has forwarded objects: needs LRB barriers.
316 HAS_FORWARDED_BITPOS = 0,
317
318 // Heap is under marking: needs SATB barriers.
319 // For generational mode, it means either young or old marking, or both.
320 MARKING_BITPOS = 1,
321
322 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
323 EVACUATION_BITPOS = 2,
324
325 // Heap is under updating: needs no additional barriers.
326 UPDATE_REFS_BITPOS = 3,
327
328 // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
329 WEAK_ROOTS_BITPOS = 4,
330
331 // Young regions are under marking, need SATB barriers.
332 YOUNG_MARKING_BITPOS = 5,
333
334 // Old regions are under marking, need SATB barriers.
335 OLD_MARKING_BITPOS = 6
336 };
337
338 enum GCState {
339 STABLE = 0,
340 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
341 MARKING = 1 << MARKING_BITPOS,
342 EVACUATION = 1 << EVACUATION_BITPOS,
343 UPDATE_REFS = 1 << UPDATE_REFS_BITPOS,
344 WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
345 YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS,
346 OLD_MARKING = 1 << OLD_MARKING_BITPOS
347 };
348
349 private:
350 bool _gc_state_changed;
351 ShenandoahSharedBitmap _gc_state;
352 ShenandoahSharedFlag _heap_changed;
353 ShenandoahSharedFlag _degenerated_gc_in_progress;
354 ShenandoahSharedFlag _full_gc_in_progress;
355 ShenandoahSharedFlag _full_gc_move_in_progress;
356 ShenandoahSharedFlag _concurrent_strong_root_in_progress;
357
358 Atomic<size_t> _gc_no_progress_count;
359
360 // This updates the singular, global gc state. This call must happen on a safepoint.
361 void set_gc_state_at_safepoint(uint mask, bool value);
362
363 // This also updates the global gc state, but does not need to be called on a safepoint.
364 // Critically, this method will _not_ flag that the global gc state has changed and threads
365 // will continue to use their thread local copy. This is expected to be used in conjunction
366 // with a handshake operation to propagate the new gc state.
367 void set_gc_state_concurrent(uint mask, bool value);
368
369 public:
370 // This returns the raw value of the singular, global gc state.
371 inline char gc_state() const;
372
373 // Compares the given state against either the global gc state, or the thread local state.
374 // The global gc state may change on a safepoint and is the correct value to use until
375 // the global gc state has been propagated to all threads (after which, this method will
376 // compare against the thread local state). The thread local gc state may also be changed
377 // by a handshake operation, in which case, this function continues using the updated thread
378 // local value.
379 inline bool is_gc_state(GCState state) const;
380
381 // This copies the global gc state into a thread local variable for all threads.
382 // The thread local gc state is primarily intended to support quick access at barriers.
383 // All threads are updated because in some cases the control thread or the vm thread may
384 // need to execute the load reference barrier.
385 void propagate_gc_state_to_all_threads();
386
387 // This is public to support assertions that the state hasn't been changed off of
388 // a safepoint and that any changes were propagated to threads after the safepoint.
389 bool has_gc_state_changed() const { return _gc_state_changed; }
390
391 // Returns true if allocations have occurred in new regions or if regions have been
392 // uncommitted since the previous calls. This call will reset the flag to false.
393 bool has_changed() {
394 return _heap_changed.try_unset();
395 }
396
397 virtual void start_idle_span();
398
399 void set_concurrent_young_mark_in_progress(bool in_progress);
400 void set_concurrent_old_mark_in_progress(bool in_progress);
401 void set_evacuation_in_progress(bool in_progress);
402 void set_update_refs_in_progress(bool in_progress);
403 void set_degenerated_gc_in_progress(bool in_progress);
404 void set_full_gc_in_progress(bool in_progress);
405 void set_full_gc_move_in_progress(bool in_progress);
406 void set_has_forwarded_objects(bool cond);
407 void set_concurrent_strong_root_in_progress(bool cond);
408 void set_concurrent_weak_root_in_progress(bool cond);
409
410 inline bool is_idle() const;
411 inline bool is_concurrent_mark_in_progress() const;
412 inline bool is_concurrent_young_mark_in_progress() const;
413 inline bool is_concurrent_old_mark_in_progress() const;
414 inline bool is_update_refs_in_progress() const;
415 inline bool is_evacuation_in_progress() const;
416 inline bool is_degenerated_gc_in_progress() const;
417 inline bool is_full_gc_in_progress() const;
418 inline bool is_full_gc_move_in_progress() const;
419 inline bool has_forwarded_objects() const;
420
421 inline bool is_stw_gc_in_progress() const;
422 inline bool is_concurrent_strong_root_in_progress() const;
423 inline bool is_concurrent_weak_root_in_progress() const;
424 bool is_prepare_for_old_mark_in_progress() const;
425
426 private:
427 void manage_satb_barrier(bool active);
428
429 // Records the time of the first successful cancellation request. This is used to measure
430 // the responsiveness of the heuristic when starting a cycle.
431 double _cancel_requested_time;
432
433 // Indicates the reason the current GC has been cancelled (GCCause::_no_gc means the gc is not cancelled).
434 ShenandoahSharedEnumFlag<GCCause::Cause> _cancelled_gc;
435
436 // Returns true if cancel request was successfully communicated.
437 // Returns false if some other thread already communicated cancel
438 // request. A true return value does not mean GC has been
439 // cancelled, only that the process of cancelling GC has begun.
440 bool try_cancel_gc(GCCause::Cause cause);
441
442 public:
443 // True if gc has been cancelled
444 inline bool cancelled_gc() const;
445
446 // Used by workers in the GC cycle to detect cancellation and honor STS requirements
447 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
448
449 // This indicates the reason the last GC cycle was cancelled.
450 inline GCCause::Cause cancelled_cause() const;
451
452 // Clears the cancellation cause and resets the oom handler
453 inline void clear_cancelled_gc();
454
455 // Clears the cancellation cause iff the current cancellation reason equals the given
456 // expected cancellation cause. Does not reset the oom handler.
457 inline GCCause::Cause clear_cancellation(GCCause::Cause expected);
458
459 void cancel_concurrent_mark();
460
461 // Returns true if and only if this call caused a gc to be cancelled.
462 bool cancel_gc(GCCause::Cause cause);
463
464 // Returns true if the soft maximum heap has been changed using management APIs.
465 bool check_soft_max_changed();
466
467 protected:
468 // This is shared between shConcurrentGC and shDegenerateGC so that degenerated
469 // GC can resume update refs from where the concurrent GC was cancelled. It is
470 // also used in shGenerationalHeap, which uses a different closure for update refs.
471 ShenandoahRegionIterator _update_refs_iterator;
472
473 private:
474 inline void reset_cancellation_time();
475
476 // GC support
477 // Evacuation
478 virtual void evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent);
479 // Concurrent root processing
480 void prepare_concurrent_roots();
481 void finish_concurrent_roots();
482 // Concurrent class unloading support
483 void do_class_unloading();
484 // Reference updating
485 void prepare_update_heap_references();
486
487 // Retires LABs used for evacuation
488 void concurrent_prepare_for_update_refs();
489
490 // Turn off weak roots flag, purge old satb buffers in generational mode
491 void concurrent_final_roots(HandshakeClosure* handshake_closure = nullptr);
492
493 virtual void update_heap_references(ShenandoahGeneration* generation, bool concurrent);
494 // Final update region states
495 void update_heap_region_states(bool concurrent);
496 virtual void final_update_refs_update_region_states();
497
498 void rendezvous_threads(const char* name);
499 void recycle_trash();
500 public:
501 // The following two functions rebuild the free set at the end of GC, in preparation for an idle phase.
502 void rebuild_free_set(bool concurrent);
503 void rebuild_free_set_within_phase();
504 void notify_gc_progress();
505 void notify_gc_no_progress();
506 size_t get_gc_no_progress_count() const;
507
508 // The uncommit thread targets soft max heap, notify this thread when that value has changed.
509 void notify_soft_max_changed();
510
511 // An explicit GC request may have freed regions, notify the uncommit thread.
512 void notify_explicit_gc_requested();
513
514 private:
515 ShenandoahGeneration* _global_generation;
516
517 protected:
518 // The control thread presides over concurrent collection cycles
519 ShenandoahController* _control_thread;
520
521 // The uncommit thread periodically attempts to uncommit regions that have been empty for longer than ShenandoahUncommitDelay
522 ShenandoahUncommitThread* _uncommit_thread;
523
524 ShenandoahYoungGeneration* _young_generation;
525 ShenandoahOldGeneration* _old_generation;
526
527 private:
528 ShenandoahCollectorPolicy* _shenandoah_policy;
529 ShenandoahMode* _gc_mode;
530 ShenandoahFreeSet* _free_set;
531 ShenandoahPacer* _pacer;
532 ShenandoahVerifier* _verifier;
533
534 ShenandoahPhaseTimings* _phase_timings;
535 ShenandoahMmuTracker _mmu_tracker;
536
537 public:
538 ShenandoahController* control_thread() const { return _control_thread; }
539
540 ShenandoahGeneration* global_generation() const { return _global_generation; }
541 ShenandoahYoungGeneration* young_generation() const {
542 assert(mode()->is_generational(), "Young generation requires generational mode");
543 return _young_generation;
544 }
545
546 ShenandoahOldGeneration* old_generation() const {
547 assert(ShenandoahCardBarrier, "Card mark barrier should be on");
548 return _old_generation;
549 }
550
551 ShenandoahGeneration* generation_for(ShenandoahAffiliation affiliation) const;
552
553 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
554 ShenandoahMode* mode() const { return _gc_mode; }
555 ShenandoahFreeSet* free_set() const { return _free_set; }
556 ShenandoahPacer* pacer() const { return _pacer; }
557
558 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
559
560 ShenandoahEvacOOMHandler* oom_evac_handler() { return &_oom_evac_handler; }
561
562 ShenandoahEvacuationTracker* evac_tracker() const {
563 return _evac_tracker;
564 }
565
566 void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
567 void on_cycle_end(ShenandoahGeneration* generation);
568
569 ShenandoahVerifier* verifier();
570
571 // ---------- VM subsystem bindings
572 //
573 private:
574 ShenandoahMonitoringSupport* _monitoring_support;
575 MemoryPool* _memory_pool;
576 GCMemoryManager _stw_memory_manager;
577 GCMemoryManager _cycle_memory_manager;
578 ConcurrentGCTimer* _gc_timer;
579 // For exporting to SA
580 int _log_min_obj_alignment_in_bytes;
581 public:
582 ShenandoahMonitoringSupport* monitoring_support() const { return _monitoring_support; }
583 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
584 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
585
586 GrowableArray<GCMemoryManager*> memory_managers() override;
587 GrowableArray<MemoryPool*> memory_pools() override;
588 MemoryUsage memory_usage() override;
589 GCTracer* tracer();
590 ConcurrentGCTimer* gc_timer() const;
591
592 // ---------- Class Unloading
593 //
594 private:
595 ShenandoahSharedFlag _unload_classes;
596 ShenandoahUnload _unloader;
597
598 public:
599 void set_unload_classes(bool uc);
600 bool unload_classes() const;
601
602 // Perform STW class unloading and weak root cleaning
603 void parallel_cleaning(ShenandoahGeneration* generation, bool full_gc);
604
605 private:
606 void stw_unload_classes(bool full_gc);
607 void stw_process_weak_roots(bool full_gc);
608 void stw_weak_refs(ShenandoahGeneration* generation, bool full_gc);
609
610 inline void assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
611 ShenandoahAffiliation new_affiliation);
612
613 // Heap iteration support
614 void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
615 bool prepare_aux_bitmap_for_iteration();
616 void reclaim_aux_bitmap_for_iteration();
617
618 // ---------- Generic interface hooks
619 // Minor things that super-interface expects us to implement to play nice with
620 // the rest of runtime. Some of the things here are not required to be implemented,
621 // and can be stubbed out.
622 //
623 public:
624 // Check the pointer is in active part of Java heap.
625 // Use is_in_reserved to check if object is within heap bounds.
626 bool is_in(const void* p) const override;
627
628 // Returns true if the given oop belongs to a generation that is actively being collected.
629 inline bool is_in_active_generation(oop obj) const;
630 inline bool is_in_young(const void* p) const;
631 inline bool is_in_old(const void* p) const;
632
633 // Returns true iff the young generation is being collected and the given pointer
634 // is in the old generation. This is used to prevent the young collection from treating
635 // such an object as unreachable.
636 inline bool is_in_old_during_young_collection(oop obj) const;
637
638 inline ShenandoahAffiliation region_affiliation(const ShenandoahHeapRegion* r) const;
639 inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation);
640
641 inline ShenandoahAffiliation region_affiliation(size_t index) const;
642
643 bool requires_barriers(stackChunkOop obj) const override;
644
645 MemRegion reserved_region() const { return _reserved; }
646 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
647
648 void collect_as_vm_thread(GCCause::Cause cause) override;
649 void collect(GCCause::Cause cause) override;
650 void do_full_collection(bool clear_all_soft_refs) override;
651
652 // Used for parsing heap during error printing
653 HeapWord* block_start(const void* addr) const;
654 bool block_is_obj(const HeapWord* addr) const;
655 bool print_location(outputStream* st, void* addr) const override;
656
657 // Used for native heap walkers: heap dumpers, mostly
658 void object_iterate(ObjectClosure* cl) override;
659 // Parallel heap iteration support
660 ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
661
662 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
663 void keep_alive(oop obj) override;
664
665 // ---------- Safepoint interface hooks
666 //
667 public:
668 void safepoint_synchronize_begin() override;
669 void safepoint_synchronize_end() override;
670
671 // ---------- Code roots handling hooks
672 //
673 public:
674 void register_nmethod(nmethod* nm) override;
675 void unregister_nmethod(nmethod* nm) override;
676 void verify_nmethod(nmethod* nm) override {}
677
678 // ---------- Pinning hooks
679 //
680 public:
681 // Shenandoah supports per-object (per-region) pinning
682 void pin_object(JavaThread* thread, oop obj) override;
683 void unpin_object(JavaThread* thread, oop obj) override;
684
685 void sync_pinned_region_status();
686 void assert_pinned_region_status() const NOT_DEBUG_RETURN;
687 void assert_pinned_region_status(ShenandoahGeneration* generation) const NOT_DEBUG_RETURN;
688
689 // ---------- CDS archive support
690
691 bool can_load_archived_objects() const override { return true; }
692 HeapWord* allocate_loaded_archive_space(size_t size) override;
693 void complete_loaded_archive_space(MemRegion archive_space) override;
694
695 // ---------- Allocation support
696 //
697 protected:
698 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
699
700 private:
701 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
702 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
703 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
704
705 // We want to retry an unsuccessful attempt at allocation until at least a full gc.
706 bool should_retry_allocation(size_t original_full_gc_count) const;
707
708 public:
709 HeapWord* allocate_memory(ShenandoahAllocRequest& request);
710 HeapWord* mem_allocate(size_t size) override;
711 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
712 size_t size,
713 Metaspace::MetadataType mdtype) override;
714
715 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
716 size_t tlab_capacity() const override;
717 size_t unsafe_max_tlab_alloc() const override;
718 size_t max_tlab_size() const override;
719 size_t tlab_used() const override;
720
721 void ensure_parsability(bool retire_labs) override;
722
723 void labs_make_parsable();
724 void tlabs_retire(bool resize);
725 void gclabs_retire(bool resize);
726
727 // ---------- Marking support
728 //
729 private:
730 ShenandoahMarkingContext* _marking_context;
731 MemRegion _bitmap_region;
732 MemRegion _aux_bitmap_region;
733 MarkBitMap _verification_bit_map;
734 MarkBitMap _aux_bit_map;
735
736 size_t _bitmap_size;
737 size_t _bitmap_regions_per_slice;
738 size_t _bitmap_bytes_per_slice;
739
740 size_t _pretouch_heap_page_size;
741 size_t _pretouch_bitmap_page_size;
742
743 bool _bitmap_region_special;
744 bool _aux_bitmap_region_special;
745
746 ShenandoahLiveData** _liveness_cache;
747
748 public:
749 // Return the marking context regardless of the completeness status.
750 inline ShenandoahMarkingContext* marking_context() const;
751
752 template<class T>
753 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
754
755 template<class T>
756 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
757
758 template<class T>
759 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
760
761 // SATB barriers hooks
762 inline bool requires_marking(const void* entry) const;
763
764 // Support for bitmap uncommits
765 void commit_bitmap_slice(ShenandoahHeapRegion *r);
766 void uncommit_bitmap_slice(ShenandoahHeapRegion *r);
767 bool is_bitmap_region_special() { return _bitmap_region_special; }
768 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
769
770 // During concurrent reset, the control thread will zero out the mark bitmaps for committed regions.
771 // This cannot happen when the uncommit thread is simultaneously trying to uncommit regions and their bitmaps.
772 // To prevent these threads from working at the same time, we provide these methods for the control thread to
773 // prevent the uncommit thread from working while a collection cycle is in progress.
774
775 // Forbid uncommits (will stop and wait if regions are being uncommitted)
776 void forbid_uncommit();
777
778 // Allow the uncommit thread to process regions
779 void allow_uncommit();
780 #ifdef ASSERT
781 bool is_uncommit_in_progress();
782 #endif
783
784 // Liveness caching support
785 ShenandoahLiveData* get_liveness_cache(uint worker_id);
786 void flush_liveness_cache(uint worker_id);
787
788 size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
789
790 // ---------- Evacuation support
791 //
792 private:
793 ShenandoahCollectionSet* _collection_set;
794 ShenandoahEvacOOMHandler _oom_evac_handler;
795
796 oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
797
798 protected:
799 // Used primarily to look for failed evacuation attempts.
800 ShenandoahEvacuationTracker* _evac_tracker;
801
802 public:
803 static address in_cset_fast_test_addr();
804
805 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
806
807 // Checks if object is in the collection set.
808 inline bool in_collection_set(oop obj) const;
809
810 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
811 inline bool in_collection_set_loc(void* loc) const;
812
813 // Evacuates or promotes object src. Returns the evacuated object, either evacuated
814 // by this thread, or by some other thread.
815 virtual oop evacuate_object(oop src, Thread* thread);
816
817 // Call before/after evacuation.
818 inline void enter_evacuation(Thread* t);
819 inline void leave_evacuation(Thread* t);
820
821 // ---------- Helper functions
822 //
823 public:
824 template <class T>
825 inline void conc_update_with_forwarded(T* p);
826
827 template <class T>
828 inline void non_conc_update_with_forwarded(T* p);
829
830 static inline void atomic_update_oop(oop update, oop* addr, oop compare);
831 static inline void atomic_update_oop(oop update, narrowOop* addr, oop compare);
832 static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
833
834 static inline bool atomic_update_oop_check(oop update, oop* addr, oop compare);
835 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, oop compare);
836 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
837
838 static inline void atomic_clear_oop( oop* addr, oop compare);
839 static inline void atomic_clear_oop(narrowOop* addr, oop compare);
840 static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
841
842 size_t trash_humongous_region_at(ShenandoahHeapRegion *r) const;
843
844 static inline void increase_object_age(oop obj, uint additional_age);
845
846 // Return the object's age, or a sentinel value when the age can't
847 // necessarily be determined because of concurrent locking by the
848 // mutator
849 static inline uint get_object_age(oop obj);
850
851 void log_heap_status(const char *msg) const;
852
853 private:
854 void trash_cset_regions();
855
856 // ---------- Testing helpers functions
857 //
858 private:
859 ShenandoahSharedFlag _inject_alloc_failure;
860
861 void try_inject_alloc_failure();
862 bool should_inject_alloc_failure();
863 };
864
865 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP