1 /*
2 * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_COLLECTEDHEAP_HPP
26 #define SHARE_GC_SHARED_COLLECTEDHEAP_HPP
27
28 #include "gc/shared/gcCause.hpp"
29 #include "gc/shared/gcWhen.hpp"
30 #include "gc/shared/verifyOption.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/metaspace.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/stackChunkOop.hpp"
35 #include "runtime/handles.hpp"
36 #include "runtime/perfDataTypes.hpp"
37 #include "runtime/safepoint.hpp"
38 #include "services/cpuTimeUsage.hpp"
39 #include "services/memoryUsage.hpp"
40 #include "utilities/debug.hpp"
41 #include "utilities/formatBuffer.hpp"
42 #include "utilities/growableArray.hpp"
43
44 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
45 // is an abstract class: there may be many different kinds of heaps. This
46 // class defines the functions that a heap must implement, and contains
47 // infrastructure common to all heaps.
48
49 class GCHeapLog;
50 class GCHeapSummary;
51 class GCMemoryManager;
52 class GCMetaspaceLog;
53 class GCTimer;
54 class GCTracer;
55 class MemoryPool;
56 class MetaspaceSummary;
57 class ReservedHeapSpace;
58 class Thread;
59 class ThreadClosure;
60 class VirtualSpaceSummary;
61 class WorkerThreads;
62 class nmethod;
63
64 class ParallelObjectIteratorImpl : public CHeapObj<mtGC> {
65 public:
66 virtual ~ParallelObjectIteratorImpl() {}
67 virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0;
68 };
69
70 // User facing parallel object iterator. This is a StackObj, which ensures that
71 // the _impl is allocated and deleted in the scope of this object. This ensures
72 // the life cycle of the implementation is as required by ThreadsListHandle,
73 // which is sometimes used by the root iterators.
74 class ParallelObjectIterator : public StackObj {
75 ParallelObjectIteratorImpl* _impl;
76
77 public:
78 ParallelObjectIterator(uint thread_num);
79 ~ParallelObjectIterator();
80 void object_iterate(ObjectClosure* cl, uint worker_id);
81 };
82
83 //
84 // CollectedHeap
85 // SerialHeap
86 // G1CollectedHeap
87 // ParallelScavengeHeap
88 // ShenandoahHeap
89 // ZCollectedHeap
90 //
91 class CollectedHeap : public CHeapObj<mtGC> {
92 friend class CPUTimeUsage::GC;
93 friend class VMStructs;
94 friend class JVMCIVMStructs;
95 friend class IsSTWGCActiveMark; // Block structured external access to _is_stw_gc_active
96 friend class MemAllocator;
97
98 private:
99 GCHeapLog* _heap_log;
100 GCMetaspaceLog* _metaspace_log;
101
102 // Historic gc information
103 size_t _capacity_at_last_gc;
104 size_t _used_at_last_gc;
105
106 // First, set it to java_lang_Object.
107 // Then, set it to FillerObject after the FillerObject_klass loading is complete.
108 static Klass* _filler_object_klass;
109
110 protected:
111 // Not used by all GCs
112 MemRegion _reserved;
113
114 bool _is_stw_gc_active;
115
116 // (Minimum) Alignment reserve for TLABs and PLABs.
117 static size_t _lab_alignment_reserve;
118 // Used for filler objects (static, but initialized in ctor).
119 static size_t _filler_array_max_size;
120
121 static size_t _stack_chunk_max_size; // 0 for no limit
122
123 // Last time the whole heap has been examined in support of RMI
124 // MaxObjectInspectionAge.
125 // This timestamp must be monotonically non-decreasing to avoid
126 // time-warp warnings.
127 jlong _last_whole_heap_examined_time_ns;
128
129 unsigned int _total_collections; // ... started
130 unsigned int _total_full_collections; // ... started
131 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
132 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
133
134 jlong _vmthread_cpu_time;
135
136 // Reason for current garbage collection. Should be set to
137 // a value reflecting no collection between collections.
138 GCCause::Cause _gc_cause;
139 GCCause::Cause _gc_lastcause;
140 PerfStringVariable* _perf_gc_cause;
141 PerfStringVariable* _perf_gc_lastcause;
142
143 // Constructor
144 CollectedHeap();
145
146 // Create a new tlab. All TLAB allocations must go through this.
147 // To allow more flexible TLAB allocations min_size specifies
148 // the minimum size needed, while requested_size is the requested
149 // size based on ergonomics. The actually allocated size will be
150 // returned in actual_size.
151 virtual HeapWord* allocate_new_tlab(size_t min_size,
152 size_t requested_size,
153 size_t* actual_size) = 0;
154
155 // Reinitialize tlabs before resuming mutators.
156 virtual void resize_all_tlabs();
157
158 // Raw memory allocation facilities
159 // The obj and array allocate methods are covers for these methods.
160 // mem_allocate() should never be
161 // called to allocate TLABs, only individual objects.
162 virtual HeapWord* mem_allocate(size_t size) = 0;
163
164 // Filler object utilities.
165 static inline size_t filler_array_hdr_size();
166
167 static size_t filler_array_min_size();
168
169 protected:
170 static inline void zap_filler_array_with(HeapWord* start, size_t words, juint value);
171 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
172
173 // Fill with a single array; caller must ensure filler_array_min_size() <=
174 // words <= filler_array_max_size().
175 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
176
177 // Fill with a single object (either an int array or a java.lang.Object).
178 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
179
180 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
181
182 // Verification functions
183 DEBUG_ONLY(static void check_for_valid_allocation_state();)
184
185 public:
186 enum Name {
187 None,
188 Serial,
189 Parallel,
190 G1,
191 Epsilon,
192 Z,
193 Shenandoah
194 };
195
196 protected:
197 // Get a pointer to the derived heap object. Used to implement
198 // derived class heap() functions rather than being called directly.
199 template<typename T>
200 static T* named_heap(Name kind) {
201 CollectedHeap* heap = Universe::heap();
202 assert(heap != nullptr, "Uninitialized heap");
203 assert(kind == heap->kind(), "Heap kind %u should be %u",
204 static_cast<uint>(heap->kind()), static_cast<uint>(kind));
205 return static_cast<T*>(heap);
206 }
207
208 // Print any relevant tracing info that flags imply.
209 // Default implementation does nothing.
210 virtual void print_tracing_info() const = 0;
211
212 // Stop any onging concurrent work and prepare for exit.
213 virtual void stop() = 0;
214
215 public:
216
217 static inline size_t filler_array_max_size() {
218 return _filler_array_max_size;
219 }
220
221 static inline size_t stack_chunk_max_size() {
222 return _stack_chunk_max_size;
223 }
224
225 static inline Klass* filler_object_klass() {
226 return _filler_object_klass;
227 }
228
229 static inline void set_filler_object_klass(Klass* k) {
230 _filler_object_klass = k;
231 }
232
233 virtual Name kind() const = 0;
234
235 virtual const char* name() const = 0;
236
237 /**
238 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
239 * and JNI_OK on success.
240 */
241 virtual jint initialize() = 0;
242
243 // In many heaps, there will be a need to perform some initialization activities
244 // after the Universe is fully formed, but before general heap allocation is allowed.
245 // This is the correct place to place such initialization methods.
246 virtual void post_initialize();
247
248 bool is_shutting_down() const;
249
250 // If the VM is shutting down, we may have skipped VM_CollectForAllocation.
251 // In this case, stall the allocation request briefly in the hope that
252 // the VM shutdown completes before the allocation request returns.
253 void stall_for_vm_shutdown();
254
255 void before_exit();
256
257 // Stop and resume concurrent GC threads interfering with safepoint operations
258 virtual void safepoint_synchronize_begin() {}
259 virtual void safepoint_synchronize_end() {}
260
261 void add_vmthread_cpu_time(jlong time);
262
263 void initialize_reserved_region(const ReservedHeapSpace& rs);
264
265 virtual size_t capacity() const = 0;
266 virtual size_t used() const = 0;
267
268 // Returns unused capacity.
269 virtual size_t unused() const;
270
271 // Historic gc information
272 size_t free_at_last_gc() const { return _capacity_at_last_gc - _used_at_last_gc; }
273 size_t used_at_last_gc() const { return _used_at_last_gc; }
274 void update_capacity_and_used_at_gc();
275
276 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
277 // memory that the vm could make available for storing 'normal' java objects.
278 // This is based on the reserved address space, but should not include space
279 // that the vm uses internally for bookkeeping or temporary storage
280 // (e.g., in the case of the young gen, one of the survivor
281 // spaces).
282 virtual size_t max_capacity() const = 0;
283
284 // Returns "TRUE" iff "p" points into the committed areas of the heap.
285 // This method can be expensive so avoid using it in performance critical
286 // code.
287 virtual bool is_in(const void* p) const = 0;
288
289 DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == nullptr || is_in(p); })
290
291 void set_gc_cause(GCCause::Cause v);
292 GCCause::Cause gc_cause() { return _gc_cause; }
293
294 oop obj_allocate(Klass* klass, size_t size, TRAPS);
295 virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS);
296 oop class_allocate(Klass* klass, size_t size, TRAPS);
297
298 // Utilities for turning raw memory into filler objects.
299 //
300 // min_fill_size() is the smallest region that can be filled.
301 // fill_with_objects() can fill arbitrary-sized regions of the heap using
302 // multiple objects. fill_with_object() is for regions known to be smaller
303 // than the largest array of integers; it uses a single object to fill the
304 // region and has slightly less overhead.
305 static size_t min_fill_size() {
306 return size_t(align_object_size(oopDesc::header_size()));
307 }
308
309 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
310
311 static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
312 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
313 fill_with_object(start, pointer_delta(end, start), zap);
314 }
315
316 virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
317 static size_t min_dummy_object_size() {
318 return oopDesc::header_size();
319 }
320
321 static size_t lab_alignment_reserve() {
322 assert(_lab_alignment_reserve != SIZE_MAX, "uninitialized");
323 return _lab_alignment_reserve;
324 }
325
326 // Some heaps may be in an unparseable state at certain times between
327 // collections. This may be necessary for efficient implementation of
328 // certain allocation-related activities. Calling this function before
329 // attempting to parse a heap ensures that the heap is in a parsable
330 // state (provided other concurrent activity does not introduce
331 // unparsability). It is normally expected, therefore, that this
332 // method is invoked with the world stopped.
333 // NOTE: if you override this method, make sure you call
334 // super::ensure_parsability so that the non-generational
335 // part of the work gets done. See implementation of
336 // CollectedHeap::ensure_parsability and, for instance,
337 // that of ParallelScavengeHeap::ensure_parsability().
338 // The argument "retire_tlabs" controls whether existing TLABs
339 // are merely filled or also retired, thus preventing further
340 // allocation from them and necessitating allocation of new TLABs.
341 virtual void ensure_parsability(bool retire_tlabs);
342
343 // The amount of space available for thread-local allocation buffers.
344 virtual size_t tlab_capacity() const = 0;
345
346 // The amount of space used for thread-local allocation buffers.
347 virtual size_t tlab_used() const = 0;
348
349 virtual size_t max_tlab_size() const;
350
351 // An estimate of the maximum allocation that could be performed
352 // for thread-local allocation buffers without triggering any
353 // collection or expansion activity.
354 virtual size_t unsafe_max_tlab_alloc() const = 0;
355
356 // Perform a collection of the heap; intended for use in implementing
357 // "System.gc". This probably implies as full a collection as the
358 // "CollectedHeap" supports.
359 virtual void collect(GCCause::Cause cause) = 0;
360
361 // Perform a full collection
362 virtual void do_full_collection(bool clear_all_soft_refs) = 0;
363
364 // This interface assumes that it's being called by the
365 // vm thread. It collects the heap assuming that the
366 // heap lock is already held and that we are executing in
367 // the context of the vm thread.
368 virtual void collect_as_vm_thread(GCCause::Cause cause);
369
370 virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
371 size_t size,
372 Metaspace::MetadataType mdtype);
373
374 // Return true, if accesses to the object would require barriers.
375 // This is used by continuations to copy chunks of a thread stack into StackChunk object or out of a StackChunk
376 // object back into the thread stack. These chunks may contain references to objects. It is crucial that
377 // the GC does not attempt to traverse the object while we modify it, because its structure (oopmap) is changed
378 // when stack chunks are stored into it.
379 // StackChunk objects may be reused, the GC must not assume that a StackChunk object is always a freshly
380 // allocated object.
381 virtual bool requires_barriers(stackChunkOop obj) const = 0;
382
383 // Returns "true" iff there is a stop-world GC in progress.
384 bool is_stw_gc_active() const { return _is_stw_gc_active; }
385
386 // Total number of GC collections (started)
387 unsigned int total_collections() const { return _total_collections; }
388 unsigned int total_full_collections() const { return _total_full_collections;}
389
390 // Increment total number of GC collections (started)
391 void increment_total_collections(bool full = false) {
392 _total_collections++;
393 if (full) {
394 _total_full_collections++;
395 }
396 }
397
398 virtual MemoryUsage memory_usage();
399 virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
400 virtual GrowableArray<MemoryPool*> memory_pools() = 0;
401
402 // Iterate over all objects, calling "cl.do_object" on each.
403 virtual void object_iterate(ObjectClosure* cl) = 0;
404
405 virtual ParallelObjectIteratorImpl* parallel_object_iterator(uint thread_num) {
406 return nullptr;
407 }
408
409 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
410 virtual void keep_alive(oop obj) {}
411
412 // Perform any cleanup actions necessary before allowing a verification.
413 virtual void prepare_for_verify() = 0;
414
415 // Returns the longest time (in ms) that has elapsed since the last
416 // time that the whole heap has been examined by a garbage collection.
417 jlong millis_since_last_whole_heap_examined();
418 // GC should call this when the next whole heap analysis has completed to
419 // satisfy above requirement.
420 void record_whole_heap_examined_timestamp();
421
422 private:
423 // Generate any dumps preceding or following a full gc
424 void full_gc_dump(GCTimer* timer, bool before);
425
426 virtual void initialize_serviceability() = 0;
427
428 void print_relative_to_gc(GCWhen::Type when) const;
429
430 public:
431 void pre_full_gc_dump(GCTimer* timer);
432 void post_full_gc_dump(GCTimer* timer);
433
434 virtual VirtualSpaceSummary create_heap_space_summary();
435 GCHeapSummary create_heap_summary();
436
437 MetaspaceSummary create_metaspace_summary();
438
439 // GCs are free to represent the bit representation for null differently in memory,
440 // which is typically not observable when using the Access API. However, if for
441 // some reason a context doesn't allow using the Access API, then this function
442 // explicitly checks if the given memory location contains a null value.
443 virtual bool contains_null(const oop* p) const;
444
445 void print_invocation_on(outputStream* st, const char* type, GCWhen::Type when) const;
446
447 // Print heap information.
448 virtual void print_heap_on(outputStream* st) const = 0;
449
450 // Print additional information about the GC that is not included in print_heap_on().
451 virtual void print_gc_on(outputStream* st) const = 0;
452
453 // The default behavior is to call print_heap_on() and print_gc_on() on tty.
454 virtual void print() const;
455
456 // Used to print information about locations in the hs_err file.
457 virtual bool print_location(outputStream* st, void* addr) const = 0;
458
459 // Iterator for all GC threads (other than VM thread)
460 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
461
462 void print_before_gc() const;
463 void print_after_gc() const;
464
465 // Registering and unregistering an nmethod (compiled code) with the heap.
466 virtual void register_nmethod(nmethod* nm) = 0;
467 virtual void unregister_nmethod(nmethod* nm) = 0;
468 virtual void verify_nmethod(nmethod* nm) = 0;
469
470 void trace_heap_before_gc(const GCTracer* gc_tracer);
471 void trace_heap_after_gc(const GCTracer* gc_tracer);
472
473 // Heap verification
474 virtual void verify(VerifyOption option) = 0;
475
476 // Return true if concurrent gc control via WhiteBox is supported by
477 // this collector. The default implementation returns false.
478 virtual bool supports_concurrent_gc_breakpoints() const;
479
480 // Workers used in non-GC safepoints for parallel safepoint cleanup. If this
481 // method returns null, cleanup tasks are done serially in the VMThread. See
482 // `SafepointSynchronize::do_cleanup_tasks` for details.
483 // GCs using a GC worker thread pool inside GC safepoints may opt to share
484 // that pool with non-GC safepoints, avoiding creating extraneous threads.
485 // Such sharing is safe, because GC safepoints and non-GC safepoints never
486 // overlap. For example, `G1CollectedHeap::workers()` (for GC safepoints) and
487 // `G1CollectedHeap::safepoint_workers()` (for non-GC safepoints) return the
488 // same thread-pool.
489 virtual WorkerThreads* safepoint_workers() { return nullptr; }
490
491 // Support for object pinning. This is used by JNI Get*Critical()
492 // and Release*Critical() family of functions. The GC must guarantee
493 // that pinned objects never move and don't get reclaimed as garbage.
494 // These functions are potentially safepointing.
495 virtual void pin_object(JavaThread* thread, oop obj) = 0;
496 virtual void unpin_object(JavaThread* thread, oop obj) = 0;
497
498 // Support for loading objects from CDS archive into the heap
499 // (usually as a snapshot of the old generation).
500 virtual bool can_load_archived_objects() const { return false; }
501 virtual HeapWord* allocate_loaded_archive_space(size_t size) { return nullptr; }
502 virtual void complete_loaded_archive_space(MemRegion archive_space) { }
503
504 virtual bool is_oop(oop object) const;
505 // Non product verification and debugging.
506 #ifndef PRODUCT
507 // Support for PromotionFailureALot. Return true if it's time to cause a
508 // promotion failure. The no-argument version uses
509 // this->_promotion_failure_alot_count as the counter.
510 bool promotion_should_fail(volatile size_t* count);
511 bool promotion_should_fail();
512
513 // Reset the PromotionFailureALot counters. Should be called at the end of a
514 // GC in which promotion failure occurred.
515 void reset_promotion_should_fail(volatile size_t* count);
516 void reset_promotion_should_fail();
517 #endif // #ifndef PRODUCT
518 };
519
520 // Class to set and reset the GC cause for a CollectedHeap.
521
522 class GCCauseSetter : StackObj {
523 CollectedHeap* _heap;
524 GCCause::Cause _previous_cause;
525 public:
526 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
527 _heap = heap;
528 _previous_cause = _heap->gc_cause();
529 _heap->set_gc_cause(cause);
530 }
531
532 ~GCCauseSetter() {
533 _heap->set_gc_cause(_previous_cause);
534 }
535 };
536
537 #endif // SHARE_GC_SHARED_COLLECTEDHEAP_HPP
--- EOF ---