1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
4 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
28 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
29
30 #include "gc/shared/collectedHeap.hpp"
31 #include "gc/shared/markBitMap.hpp"
32 #include "gc/shenandoah/mode/shenandoahMode.hpp"
33 #include "gc/shenandoah/shenandoahAllocRequest.hpp"
34 #include "gc/shenandoah/shenandoahAsserts.hpp"
35 #include "gc/shenandoah/shenandoahController.hpp"
36 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
37 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
38 #include "gc/shenandoah/shenandoahGenerationType.hpp"
39 #include "gc/shenandoah/shenandoahLock.hpp"
40 #include "gc/shenandoah/shenandoahMmuTracker.hpp"
41 #include "gc/shenandoah/shenandoahPadding.hpp"
42 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
43 #include "gc/shenandoah/shenandoahUnload.hpp"
44 #include "memory/metaspace.hpp"
45 #include "services/memoryManager.hpp"
46 #include "utilities/globalDefinitions.hpp"
47 #include "utilities/stack.hpp"
48
49 class ConcurrentGCTimer;
50 class ObjectIterateScanRootClosure;
51 class ShenandoahCollectorPolicy;
52 class ShenandoahGCSession;
53 class ShenandoahGCStateResetter;
54 class ShenandoahGeneration;
55 class ShenandoahYoungGeneration;
56 class ShenandoahOldGeneration;
57 class ShenandoahHeuristics;
58 class ShenandoahMarkingContext;
59 class ShenandoahMode;
60 class ShenandoahPhaseTimings;
61 class ShenandoahHeap;
62 class ShenandoahHeapRegion;
63 class ShenandoahHeapRegionClosure;
64 class ShenandoahCollectionSet;
65 class ShenandoahFreeSet;
66 class ShenandoahConcurrentMark;
67 class ShenandoahFullGC;
68 class ShenandoahMonitoringSupport;
69 class ShenandoahPacer;
70 class ShenandoahReferenceProcessor;
71 class ShenandoahUncommitThread;
72 class ShenandoahVerifier;
73 class ShenandoahWorkerThreads;
74 class VMStructs;
75
76 // Used for buffering per-region liveness data.
77 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
78 // The ShenandoahHeap array has max-workers elements, each of which is an array of
79 // uint16_t * max_regions. The choice of uint16_t is not accidental:
80 // there is a tradeoff between static/dynamic footprint that translates
81 // into cache pressure (which is already high during marking), and
82 // too many atomic updates. uint32_t is too large, uint8_t is too small.
83 typedef uint16_t ShenandoahLiveData;
84 #define SHENANDOAH_LIVEDATA_MAX ((ShenandoahLiveData)-1)
85
86 class ShenandoahRegionIterator : public StackObj {
87 private:
88 ShenandoahHeap* _heap;
89
90 shenandoah_padding(0);
91 Atomic<size_t> _index;
92 shenandoah_padding(1);
93
94 // No implicit copying: iterators should be passed by reference to capture the state
95 NONCOPYABLE(ShenandoahRegionIterator);
96
97 public:
98 ShenandoahRegionIterator();
99 ShenandoahRegionIterator(ShenandoahHeap* heap);
100
101 // Reset iterator to default state
102 void reset();
103
104 // Returns next region, or null if there are no more regions.
105 // This is multi-thread-safe.
106 inline ShenandoahHeapRegion* next();
107
108 // This is *not* MT safe. However, in the absence of multithreaded access, it
109 // can be used to determine if there is more work to do.
110 bool has_next() const;
111 };
112
113 class ShenandoahHeapRegionClosure : public StackObj {
114 public:
115 virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
116 virtual size_t parallel_region_stride() { return ShenandoahParallelRegionStride; }
117 virtual bool is_thread_safe() { return false; }
118 };
119
120 typedef ShenandoahLock ShenandoahHeapLock;
121 // ShenandoahHeapLocker implements locker to assure mutually exclusive access to the global heap data structures.
122 // Asserts in the implementation detect potential deadlock usage with regards the rebuild lock that is present
123 // in ShenandoahFreeSet. Whenever both locks are acquired, this lock should be acquired before the
124 // ShenandoahFreeSet rebuild lock.
125 class ShenandoahHeapLocker : public StackObj {
126 private:
127 ShenandoahHeapLock* _lock;
128 public:
129 ShenandoahHeapLocker(ShenandoahHeapLock* lock, bool allow_block_for_safepoint = false);
130
131 ~ShenandoahHeapLocker() {
132 _lock->unlock();
133 }
134 };
135
136 typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
137
138 // Shenandoah GC is low-pause concurrent GC that uses a load reference barrier
139 // for concurent evacuation and a snapshot-at-the-beginning write barrier for
140 // concurrent marking. See ShenandoahControlThread for GC cycle structure.
141 //
142 class ShenandoahHeap : public CollectedHeap {
143 friend class ShenandoahAsserts;
144 friend class VMStructs;
145 friend class ShenandoahGCSession;
146 friend class ShenandoahGCStateResetter;
147 friend class ShenandoahParallelObjectIterator;
148 friend class ShenandoahSafepoint;
149
150 // Supported GC
151 friend class ShenandoahConcurrentGC;
152 friend class ShenandoahOldGC;
153 friend class ShenandoahDegenGC;
154 friend class ShenandoahFullGC;
155 friend class ShenandoahUnload;
156
157 // ---------- Locks that guard important data structures in Heap
158 //
159 private:
160 ShenandoahHeapLock _lock;
161
162 // This is set and cleared by only the VMThread
163 // at each STW pause (safepoint) to the value given to the VM operation.
164 // This allows the value to be always consistently
165 // seen by all mutators as well as all GC worker threads.
166 ShenandoahGeneration* _active_generation;
167
168 protected:
169 void print_tracing_info() const override;
170 void stop() override;
171
172 public:
173 ShenandoahHeapLock* lock() {
174 return &_lock;
175 }
176
177 ShenandoahGeneration* active_generation() const {
178 // value of _active_generation field, see above
179 return _active_generation;
180 }
181
182 // Update the _active_generation field: can only be called at a safepoint by the VMThread.
183 void set_active_generation(ShenandoahGeneration* generation);
184
185 ShenandoahHeuristics* heuristics();
186
187 // ---------- Initialization, termination, identification, printing routines
188 //
189 public:
190 static ShenandoahHeap* heap();
191
192 const char* name() const override { return "Shenandoah"; }
193 ShenandoahHeap::Name kind() const override { return CollectedHeap::Shenandoah; }
194
195 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
196 jint initialize() override;
197 void post_initialize() override;
198 virtual void initialize_generations();
199 void initialize_mode();
200 virtual void initialize_heuristics();
201 virtual void post_initialize_heuristics();
202 virtual void print_init_logger() const;
203 void initialize_serviceability() override;
204
205 void print_heap_on(outputStream* st) const override;
206 void print_gc_on(outputStream* st) const override;
207 void print_heap_regions_on(outputStream* st) const;
208
209 // Flushes cycle timings to global timings and prints the phase timings for the last completed cycle.
210 void process_gc_stats() const;
211
212 void prepare_for_verify() override;
213 void verify(VerifyOption vo) override;
214
215 // WhiteBox testing support.
216 bool supports_concurrent_gc_breakpoints() const override {
217 return true;
218 }
219
220 // ---------- Heap counters and metrics
221 //
222 private:
223 size_t _initial_size;
224 size_t _minimum_size;
225
226 Atomic<size_t> _soft_max_size;
227 shenandoah_padding(0);
228 Atomic<size_t> _committed;
229 shenandoah_padding(1);
230
231 public:
232 void increase_committed(size_t bytes);
233 void decrease_committed(size_t bytes);
234
235 void reset_bytes_allocated_since_gc_start();
236
237 size_t min_capacity() const;
238 size_t max_capacity() const override;
239 size_t soft_max_capacity() const;
240 size_t initial_capacity() const;
241 size_t capacity() const override;
242 size_t used() const override;
243 size_t committed() const;
244
245 void set_soft_max_capacity(size_t v);
246
247 // ---------- Periodic Tasks
248 //
249 public:
250 // Notify heuristics and region state change logger that the state of the heap has changed
251 void notify_heap_changed();
252
253 // Force counters to update
254 void set_forced_counters_update(bool value);
255
256 // Update counters if forced flag is set
257 void handle_force_counters_update();
258
259 // ---------- Workers handling
260 //
261 private:
262 uint _max_workers;
263 ShenandoahWorkerThreads* _workers;
264 ShenandoahWorkerThreads* _safepoint_workers;
265
266 virtual void initialize_controller();
267
268 public:
269 uint max_workers();
270 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
271
272 WorkerThreads* workers() const;
273 WorkerThreads* safepoint_workers() override;
274
275 void gc_threads_do(ThreadClosure* tcl) const override;
276
277 // ---------- Heap regions handling machinery
278 //
279 private:
280 MemRegion _heap_region;
281 bool _heap_region_special;
282 size_t _num_regions;
283 ShenandoahHeapRegion** _regions;
284 uint8_t* _affiliations; // Holds array of enum ShenandoahAffiliation, including FREE status in non-generational mode
285
286 public:
287
288 inline HeapWord* base() const { return _heap_region.start(); }
289 inline HeapWord* end() const { return _heap_region.end(); }
290
291 inline size_t num_regions() const { return _num_regions; }
292 inline bool is_heap_region_special() { return _heap_region_special; }
293
294 inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const;
295 inline size_t heap_region_index_containing(const void* addr) const;
296
297 inline ShenandoahHeapRegion* get_region(size_t region_idx) const;
298
299 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
300 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
301
302 inline ShenandoahMmuTracker* mmu_tracker() { return &_mmu_tracker; };
303
304 // ---------- GC state machinery
305 //
306 // Important: Do not change the values of these flags. AArch64 GC barriers
307 // depend on the flags having specific values.
308 //
309 // GC state describes the important parts of collector state, that may be
310 // used to make barrier selection decisions in the native and generated code.
311 // Multiple bits can be set at once.
312 //
313 // Important invariant: when GC state is zero, the heap is stable, and no barriers
314 // are required.
315 //
316 public:
317 enum GCStateBitPos {
318 // Heap has forwarded objects: needs LRB barriers.
319 HAS_FORWARDED_BITPOS = 0,
320
321 // Heap is under marking: needs SATB barriers.
322 // For generational mode, it means either young or old marking, or both.
323 MARKING_BITPOS = 1,
324
325 // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED)
326 EVACUATION_BITPOS = 2,
327
328 // Heap is under updating: needs no additional barriers.
329 UPDATE_REFS_BITPOS = 3,
330
331 // Heap is under weak-reference/roots processing: needs weak-LRB barriers.
332 WEAK_ROOTS_BITPOS = 4,
333
334 // Young regions are under marking, need SATB barriers.
335 YOUNG_MARKING_BITPOS = 5,
336
337 // Old regions are under marking, need SATB barriers.
338 OLD_MARKING_BITPOS = 6
339 };
340
341 enum GCState {
342 STABLE = 0,
343 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
344 MARKING = 1 << MARKING_BITPOS,
345 EVACUATION = 1 << EVACUATION_BITPOS,
346 UPDATE_REFS = 1 << UPDATE_REFS_BITPOS,
347 WEAK_ROOTS = 1 << WEAK_ROOTS_BITPOS,
348 YOUNG_MARKING = 1 << YOUNG_MARKING_BITPOS,
349 OLD_MARKING = 1 << OLD_MARKING_BITPOS
350 };
351
352 private:
353 bool _gc_state_changed;
354 ShenandoahSharedBitmap _gc_state;
355 ShenandoahSharedFlag _heap_changed;
356 ShenandoahSharedFlag _degenerated_gc_in_progress;
357 ShenandoahSharedFlag _full_gc_in_progress;
358 ShenandoahSharedFlag _full_gc_move_in_progress;
359 ShenandoahSharedFlag _concurrent_strong_root_in_progress;
360
361 Atomic<size_t> _gc_no_progress_count;
362
363 // This updates the singular, global gc state. This call must happen on a safepoint.
364 void set_gc_state_at_safepoint(uint mask, bool value);
365
366 // This also updates the global gc state, but does not need to be called on a safepoint.
367 // Critically, this method will _not_ flag that the global gc state has changed and threads
368 // will continue to use their thread local copy. This is expected to be used in conjunction
369 // with a handshake operation to propagate the new gc state.
370 void set_gc_state_concurrent(uint mask, bool value);
371
372 public:
373 // This returns the raw value of the singular, global gc state.
374 inline char gc_state() const;
375
376 // Compares the given state against either the global gc state, or the thread local state.
377 // The global gc state may change on a safepoint and is the correct value to use until
378 // the global gc state has been propagated to all threads (after which, this method will
379 // compare against the thread local state). The thread local gc state may also be changed
380 // by a handshake operation, in which case, this function continues using the updated thread
381 // local value.
382 inline bool is_gc_state(GCState state) const;
383
384 // This copies the global gc state into a thread local variable for all threads.
385 // The thread local gc state is primarily intended to support quick access at barriers.
386 // All threads are updated because in some cases the control thread or the vm thread may
387 // need to execute the load reference barrier.
388 void propagate_gc_state_to_all_threads();
389
390 // This is public to support assertions that the state hasn't been changed off of
391 // a safepoint and that any changes were propagated to threads after the safepoint.
392 bool has_gc_state_changed() const { return _gc_state_changed; }
393
394 // Returns true if allocations have occurred in new regions or if regions have been
395 // uncommitted since the previous calls. This call will reset the flag to false.
396 bool has_changed() {
397 return _heap_changed.try_unset();
398 }
399
400 virtual void start_idle_span();
401
402 void set_concurrent_young_mark_in_progress(bool in_progress);
403 void set_concurrent_old_mark_in_progress(bool in_progress);
404 void set_evacuation_in_progress(bool in_progress);
405 void set_update_refs_in_progress(bool in_progress);
406 void set_degenerated_gc_in_progress(bool in_progress);
407 void set_full_gc_in_progress(bool in_progress);
408 void set_full_gc_move_in_progress(bool in_progress);
409 void set_has_forwarded_objects(bool cond);
410 void set_concurrent_strong_root_in_progress(bool cond);
411 void set_concurrent_weak_root_in_progress(bool cond);
412
413 inline bool is_idle() const;
414 inline bool is_concurrent_mark_in_progress() const;
415 inline bool is_concurrent_young_mark_in_progress() const;
416 inline bool is_concurrent_old_mark_in_progress() const;
417 inline bool is_update_refs_in_progress() const;
418 inline bool is_evacuation_in_progress() const;
419 inline bool is_degenerated_gc_in_progress() const;
420 inline bool is_full_gc_in_progress() const;
421 inline bool is_full_gc_move_in_progress() const;
422 inline bool has_forwarded_objects() const;
423
424 inline bool is_stw_gc_in_progress() const;
425 inline bool is_concurrent_strong_root_in_progress() const;
426 inline bool is_concurrent_weak_root_in_progress() const;
427 bool is_prepare_for_old_mark_in_progress() const;
428
429 private:
430 void manage_satb_barrier(bool active);
431
432 // Records the time of the first successful cancellation request. This is used to measure
433 // the responsiveness of the heuristic when starting a cycle.
434 double _cancel_requested_time;
435
436 // Indicates the reason the current GC has been cancelled (GCCause::_no_gc means the gc is not cancelled).
437 ShenandoahSharedEnumFlag<GCCause::Cause> _cancelled_gc;
438
439 // Returns true if cancel request was successfully communicated.
440 // Returns false if some other thread already communicated cancel
441 // request. A true return value does not mean GC has been
442 // cancelled, only that the process of cancelling GC has begun.
443 bool try_cancel_gc(GCCause::Cause cause);
444
445 public:
446 // True if gc has been cancelled
447 inline bool cancelled_gc() const;
448
449 // Used by workers in the GC cycle to detect cancellation and honor STS requirements
450 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
451
452 // This indicates the reason the last GC cycle was cancelled.
453 inline GCCause::Cause cancelled_cause() const;
454
455 // Clears the cancellation cause and resets the oom handler
456 inline void clear_cancelled_gc();
457
458 // Clears the cancellation cause iff the current cancellation reason equals the given
459 // expected cancellation cause. Does not reset the oom handler.
460 inline GCCause::Cause clear_cancellation(GCCause::Cause expected);
461
462 void cancel_concurrent_mark();
463
464 // Returns true if and only if this call caused a gc to be cancelled.
465 bool cancel_gc(GCCause::Cause cause);
466
467 // Returns true if the soft maximum heap has been changed using management APIs.
468 bool check_soft_max_changed();
469
470 protected:
471 // This is shared between shConcurrentGC and shDegenerateGC so that degenerated
472 // GC can resume update refs from where the concurrent GC was cancelled. It is
473 // also used in shGenerationalHeap, which uses a different closure for update refs.
474 ShenandoahRegionIterator _update_refs_iterator;
475
476 private:
477 inline void reset_cancellation_time();
478
479 // GC support
480 // Evacuation
481 virtual void evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent);
482 // Concurrent root processing
483 void prepare_concurrent_roots();
484 void finish_concurrent_roots();
485 // Concurrent class unloading support
486 void do_class_unloading();
487 // Reference updating
488 void prepare_update_heap_references();
489
490 // Retires LABs used for evacuation
491 void concurrent_prepare_for_update_refs();
492
493 // Turn off weak roots flag, purge old satb buffers in generational mode
494 void concurrent_final_roots(HandshakeClosure* handshake_closure = nullptr);
495
496 virtual void update_heap_references(ShenandoahGeneration* generation, bool concurrent);
497 // Final update region states
498 void update_heap_region_states(bool concurrent);
499 virtual void final_update_refs_update_region_states();
500
501 void rendezvous_threads(const char* name);
502 void recycle_trash();
503 public:
504 // The following two functions rebuild the free set at the end of GC, in preparation for an idle phase.
505 void rebuild_free_set(bool concurrent);
506 void rebuild_free_set_within_phase();
507 void notify_gc_progress();
508 void notify_gc_no_progress();
509 size_t get_gc_no_progress_count() const;
510
511 // The uncommit thread targets soft max heap, notify this thread when that value has changed.
512 void notify_soft_max_changed();
513
514 // An explicit GC request may have freed regions, notify the uncommit thread.
515 void notify_explicit_gc_requested();
516
517 private:
518 ShenandoahGeneration* _global_generation;
519
520 protected:
521 // The control thread presides over concurrent collection cycles
522 ShenandoahController* _control_thread;
523
524 // The uncommit thread periodically attempts to uncommit regions that have been empty for longer than ShenandoahUncommitDelay
525 ShenandoahUncommitThread* _uncommit_thread;
526
527 ShenandoahYoungGeneration* _young_generation;
528 ShenandoahOldGeneration* _old_generation;
529
530 private:
531 ShenandoahCollectorPolicy* _shenandoah_policy;
532 ShenandoahMode* _gc_mode;
533 ShenandoahFreeSet* _free_set;
534 ShenandoahPacer* _pacer;
535 ShenandoahVerifier* _verifier;
536
537 ShenandoahPhaseTimings* _phase_timings;
538 ShenandoahMmuTracker _mmu_tracker;
539
540 public:
541 ShenandoahController* control_thread() const { return _control_thread; }
542
543 ShenandoahGeneration* global_generation() const { return _global_generation; }
544 ShenandoahYoungGeneration* young_generation() const {
545 assert(mode()->is_generational(), "Young generation requires generational mode");
546 return _young_generation;
547 }
548
549 ShenandoahOldGeneration* old_generation() const {
550 assert(ShenandoahCardBarrier, "Card mark barrier should be on");
551 return _old_generation;
552 }
553
554 ShenandoahGeneration* generation_for(ShenandoahAffiliation affiliation) const;
555
556 ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
557 ShenandoahMode* mode() const { return _gc_mode; }
558 ShenandoahFreeSet* free_set() const { return _free_set; }
559 ShenandoahPacer* pacer() const { return _pacer; }
560
561 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
562
563 ShenandoahEvacOOMHandler* oom_evac_handler() { return &_oom_evac_handler; }
564
565 ShenandoahEvacuationTracker* evac_tracker() const {
566 return _evac_tracker;
567 }
568
569 void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
570 void on_cycle_end(ShenandoahGeneration* generation);
571
572 ShenandoahVerifier* verifier();
573
574 // ---------- VM subsystem bindings
575 //
576 private:
577 ShenandoahMonitoringSupport* _monitoring_support;
578 MemoryPool* _memory_pool;
579 GCMemoryManager _stw_memory_manager;
580 GCMemoryManager _cycle_memory_manager;
581 ConcurrentGCTimer* _gc_timer;
582 // For exporting to SA
583 int _log_min_obj_alignment_in_bytes;
584 public:
585 ShenandoahMonitoringSupport* monitoring_support() const { return _monitoring_support; }
586 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
587 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
588
589 GrowableArray<GCMemoryManager*> memory_managers() override;
590 GrowableArray<MemoryPool*> memory_pools() override;
591 MemoryUsage memory_usage() override;
592 GCTracer* tracer();
593 ConcurrentGCTimer* gc_timer() const;
594
595 // ---------- Class Unloading
596 //
597 private:
598 ShenandoahSharedFlag _unload_classes;
599 ShenandoahUnload _unloader;
600
601 public:
602 void set_unload_classes(bool uc);
603 bool unload_classes() const;
604
605 // Perform STW class unloading and weak root cleaning
606 void parallel_cleaning(ShenandoahGeneration* generation, bool full_gc);
607
608 private:
609 void stw_unload_classes(bool full_gc);
610 void stw_process_weak_roots(bool full_gc);
611 void stw_weak_refs(ShenandoahGeneration* generation, bool full_gc);
612
613 inline void assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
614 ShenandoahAffiliation new_affiliation);
615
616 // Heap iteration support
617 void scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops);
618 bool prepare_aux_bitmap_for_iteration();
619 void reclaim_aux_bitmap_for_iteration();
620
621 // ---------- Generic interface hooks
622 // Minor things that super-interface expects us to implement to play nice with
623 // the rest of runtime. Some of the things here are not required to be implemented,
624 // and can be stubbed out.
625 //
626 public:
627 // Check the pointer is in active part of Java heap.
628 // Use is_in_reserved to check if object is within heap bounds.
629 bool is_in(const void* p) const override;
630
631 // Returns true if the given oop belongs to a generation that is actively being collected.
632 inline bool is_in_active_generation(oop obj) const;
633 inline bool is_in_young(const void* p) const;
634 inline bool is_in_old(const void* p) const;
635
636 // Returns true iff the young generation is being collected and the given pointer
637 // is in the old generation. This is used to prevent the young collection from treating
638 // such an object as unreachable.
639 inline bool is_in_old_during_young_collection(oop obj) const;
640
641 inline ShenandoahAffiliation region_affiliation(const ShenandoahHeapRegion* r) const;
642 inline void set_affiliation(ShenandoahHeapRegion* r, ShenandoahAffiliation new_affiliation);
643
644 inline ShenandoahAffiliation region_affiliation(size_t index) const;
645
646 bool requires_barriers(stackChunkOop obj) const override;
647
648 MemRegion reserved_region() const { return _reserved; }
649 bool is_in_reserved(const void* addr) const { return _reserved.contains(addr); }
650
651 void collect_as_vm_thread(GCCause::Cause cause) override;
652 void collect(GCCause::Cause cause) override;
653 void do_full_collection(bool clear_all_soft_refs) override;
654
655 // Used for parsing heap during error printing
656 HeapWord* block_start(const void* addr) const;
657 bool block_is_obj(const HeapWord* addr) const;
658 bool print_location(outputStream* st, void* addr) const override;
659
660 // Used for native heap walkers: heap dumpers, mostly
661 void object_iterate(ObjectClosure* cl) override;
662 // Parallel heap iteration support
663 ParallelObjectIteratorImpl* parallel_object_iterator(uint workers) override;
664
665 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
666 void keep_alive(oop obj) override;
667
668 // ---------- Safepoint interface hooks
669 //
670 public:
671 void safepoint_synchronize_begin() override;
672 void safepoint_synchronize_end() override;
673
674 // ---------- Code roots handling hooks
675 //
676 public:
677 void register_nmethod(nmethod* nm) override;
678 void unregister_nmethod(nmethod* nm) override;
679 void verify_nmethod(nmethod* nm) override {}
680
681 // ---------- Pinning hooks
682 //
683 public:
684 // Shenandoah supports per-object (per-region) pinning
685 void pin_object(JavaThread* thread, oop obj) override;
686 void unpin_object(JavaThread* thread, oop obj) override;
687
688 void sync_pinned_region_status();
689 void assert_pinned_region_status() const NOT_DEBUG_RETURN;
690 void assert_pinned_region_status(ShenandoahGeneration* generation) const NOT_DEBUG_RETURN;
691
692 // ---------- CDS archive support
693
694 bool can_load_archived_objects() const override { return true; }
695 HeapWord* allocate_loaded_archive_space(size_t size) override;
696 void complete_loaded_archive_space(MemRegion archive_space) override;
697
698 // ---------- Allocation support
699 //
700 protected:
701 inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
702
703 private:
704 HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
705 HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
706 HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
707
708 // We want to retry an unsuccessful attempt at allocation until at least a full gc.
709 bool should_retry_allocation(size_t original_full_gc_count) const;
710
711 public:
712 HeapWord* allocate_memory(ShenandoahAllocRequest& request);
713 HeapWord* mem_allocate(size_t size) override;
714 MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
715 size_t size,
716 Metaspace::MetadataType mdtype) override;
717
718 HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
719 size_t tlab_capacity() const override;
720 size_t unsafe_max_tlab_alloc() const override;
721 size_t max_tlab_size() const override;
722 size_t tlab_used() const override;
723
724 void ensure_parsability(bool retire_labs) override;
725
726 void labs_make_parsable();
727 void tlabs_retire(bool resize);
728 void gclabs_retire(bool resize);
729
730 // ---------- Marking support
731 //
732 private:
733 ShenandoahMarkingContext* _marking_context;
734 MemRegion _bitmap_region;
735 MemRegion _aux_bitmap_region;
736 MarkBitMap _verification_bit_map;
737 MarkBitMap _aux_bit_map;
738
739 size_t _bitmap_size;
740 size_t _bitmap_regions_per_slice;
741 size_t _bitmap_bytes_per_slice;
742
743 size_t _pretouch_heap_page_size;
744 size_t _pretouch_bitmap_page_size;
745
746 bool _bitmap_region_special;
747 bool _aux_bitmap_region_special;
748
749 ShenandoahLiveData** _liveness_cache;
750
751 public:
752 // Return the marking context regardless of the completeness status.
753 inline ShenandoahMarkingContext* marking_context() const;
754
755 template<class T>
756 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
757
758 template<class T>
759 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
760
761 template<class T>
762 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
763
764 // SATB barriers hooks
765 inline bool requires_marking(const void* entry) const;
766
767 // Support for bitmap uncommits
768 void commit_bitmap_slice(ShenandoahHeapRegion *r);
769 void uncommit_bitmap_slice(ShenandoahHeapRegion *r);
770 bool is_bitmap_region_special() { return _bitmap_region_special; }
771 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
772
773 // During concurrent reset, the control thread will zero out the mark bitmaps for committed regions.
774 // This cannot happen when the uncommit thread is simultaneously trying to uncommit regions and their bitmaps.
775 // To prevent these threads from working at the same time, we provide these methods for the control thread to
776 // prevent the uncommit thread from working while a collection cycle is in progress.
777
778 // Forbid uncommits (will stop and wait if regions are being uncommitted)
779 void forbid_uncommit();
780
781 // Allow the uncommit thread to process regions
782 void allow_uncommit();
783 #ifdef ASSERT
784 bool is_uncommit_in_progress();
785 #endif
786
787 // Liveness caching support
788 ShenandoahLiveData* get_liveness_cache(uint worker_id);
789 void flush_liveness_cache(uint worker_id);
790
791 size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
792
793 // ---------- Evacuation support
794 //
795 private:
796 ShenandoahCollectionSet* _collection_set;
797 ShenandoahEvacOOMHandler _oom_evac_handler;
798
799 oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
800
801 protected:
802 // Used primarily to look for failed evacuation attempts.
803 ShenandoahEvacuationTracker* _evac_tracker;
804
805 public:
806 static address in_cset_fast_test_addr();
807
808 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
809
810 // Checks if object is in the collection set.
811 inline bool in_collection_set(oop obj) const;
812
813 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
814 inline bool in_collection_set_loc(void* loc) const;
815
816 // Evacuates or promotes object src. Returns the evacuated object, either evacuated
817 // by this thread, or by some other thread.
818 virtual oop evacuate_object(oop src, Thread* thread);
819
820 // Call before/after evacuation.
821 inline void enter_evacuation(Thread* t);
822 inline void leave_evacuation(Thread* t);
823
824 // ---------- Helper functions
825 //
826 public:
827 template <class T>
828 inline void conc_update_with_forwarded(T* p);
829
830 template <class T>
831 inline void non_conc_update_with_forwarded(T* p);
832
833 static inline void atomic_update_oop(oop update, oop* addr, oop compare);
834 static inline void atomic_update_oop(oop update, narrowOop* addr, oop compare);
835 static inline void atomic_update_oop(oop update, narrowOop* addr, narrowOop compare);
836
837 static inline bool atomic_update_oop_check(oop update, oop* addr, oop compare);
838 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, oop compare);
839 static inline bool atomic_update_oop_check(oop update, narrowOop* addr, narrowOop compare);
840
841 static inline void atomic_clear_oop( oop* addr, oop compare);
842 static inline void atomic_clear_oop(narrowOop* addr, oop compare);
843 static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
844
845 size_t trash_humongous_region_at(ShenandoahHeapRegion *r) const;
846
847 static inline void increase_object_age(oop obj, uint additional_age);
848
849 // Return the object's age, or a sentinel value when the age can't
850 // necessarily be determined because of concurrent locking by the
851 // mutator
852 static inline uint get_object_age(oop obj);
853
854 void log_heap_status(const char *msg) const;
855
856 private:
857 void trash_cset_regions();
858
859 // ---------- Testing helpers functions
860 //
861 private:
862 ShenandoahSharedFlag _inject_alloc_failure;
863
864 void try_inject_alloc_failure();
865 bool should_inject_alloc_failure();
866 };
867
868 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP