1 /*
2 * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_COLLECTEDHEAP_HPP
26 #define SHARE_GC_SHARED_COLLECTEDHEAP_HPP
27
28 #include "gc/shared/gcCause.hpp"
29 #include "gc/shared/gcWhen.hpp"
30 #include "gc/shared/softRefPolicy.hpp"
31 #include "gc/shared/verifyOption.hpp"
32 #include "memory/allocation.hpp"
33 #include "memory/metaspace.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/stackChunkOop.hpp"
36 #include "runtime/handles.hpp"
37 #include "runtime/perfDataTypes.hpp"
38 #include "runtime/safepoint.hpp"
39 #include "services/memoryUsage.hpp"
40 #include "utilities/debug.hpp"
41 #include "utilities/formatBuffer.hpp"
42 #include "utilities/growableArray.hpp"
43
44 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
45 // is an abstract class: there may be many different kinds of heaps. This
46 // class defines the functions that a heap must implement, and contains
47 // infrastructure common to all heaps.
48
49 class GCHeapLog;
50 class GCHeapSummary;
51 class GCTimer;
52 class GCTracer;
53 class GCMemoryManager;
54 class MemoryPool;
55 class MetaspaceSummary;
56 class ReservedHeapSpace;
57 class Thread;
58 class ThreadClosure;
59 class VirtualSpaceSummary;
60 class WorkerThreads;
61 class nmethod;
62
63 class ParallelObjectIteratorImpl : public CHeapObj<mtGC> {
64 public:
65 virtual ~ParallelObjectIteratorImpl() {}
66 virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0;
67 };
68
69 // User facing parallel object iterator. This is a StackObj, which ensures that
70 // the _impl is allocated and deleted in the scope of this object. This ensures
71 // the life cycle of the implementation is as required by ThreadsListHandle,
72 // which is sometimes used by the root iterators.
73 class ParallelObjectIterator : public StackObj {
74 ParallelObjectIteratorImpl* _impl;
75
76 public:
77 ParallelObjectIterator(uint thread_num);
78 ~ParallelObjectIterator();
79 void object_iterate(ObjectClosure* cl, uint worker_id);
80 };
81
82 //
83 // CollectedHeap
84 // SerialHeap
85 // G1CollectedHeap
86 // ParallelScavengeHeap
87 // ShenandoahHeap
88 // ZCollectedHeap
89 //
90 class CollectedHeap : public CHeapObj<mtGC> {
91 friend class VMStructs;
92 friend class JVMCIVMStructs;
93 friend class IsSTWGCActiveMark; // Block structured external access to _is_stw_gc_active
94 friend class MemAllocator;
95
96 private:
97 GCHeapLog* _gc_heap_log;
98
99 // Historic gc information
100 size_t _capacity_at_last_gc;
101 size_t _used_at_last_gc;
102
103 SoftRefPolicy _soft_ref_policy;
104
105 // First, set it to java_lang_Object.
106 // Then, set it to FillerObject after the FillerObject_klass loading is complete.
107 static Klass* _filler_object_klass;
108
109 protected:
110 // Not used by all GCs
111 MemRegion _reserved;
112
113 bool _is_stw_gc_active;
114
115 // (Minimum) Alignment reserve for TLABs and PLABs.
116 static size_t _lab_alignment_reserve;
117 // Used for filler objects (static, but initialized in ctor).
118 static size_t _filler_array_max_size;
119
120 static size_t _stack_chunk_max_size; // 0 for no limit
121
122 // Last time the whole heap has been examined in support of RMI
123 // MaxObjectInspectionAge.
124 // This timestamp must be monotonically non-decreasing to avoid
125 // time-warp warnings.
126 jlong _last_whole_heap_examined_time_ns;
127
128 unsigned int _total_collections; // ... started
129 unsigned int _total_full_collections; // ... started
130 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
131 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
132
133 // Reason for current garbage collection. Should be set to
134 // a value reflecting no collection between collections.
135 GCCause::Cause _gc_cause;
136 GCCause::Cause _gc_lastcause;
137 PerfStringVariable* _perf_gc_cause;
138 PerfStringVariable* _perf_gc_lastcause;
139
140 // Constructor
141 CollectedHeap();
142
143 // Create a new tlab. All TLAB allocations must go through this.
144 // To allow more flexible TLAB allocations min_size specifies
145 // the minimum size needed, while requested_size is the requested
146 // size based on ergonomics. The actually allocated size will be
147 // returned in actual_size.
148 virtual HeapWord* allocate_new_tlab(size_t min_size,
149 size_t requested_size,
150 size_t* actual_size) = 0;
151
152 // Reinitialize tlabs before resuming mutators.
153 virtual void resize_all_tlabs();
154
155 // Raw memory allocation facilities
156 // The obj and array allocate methods are covers for these methods.
157 // mem_allocate() should never be
158 // called to allocate TLABs, only individual objects.
159 virtual HeapWord* mem_allocate(size_t size,
160 bool* gc_overhead_limit_was_exceeded) = 0;
161
162 // Filler object utilities.
163 static inline size_t filler_array_hdr_size();
164
165 static size_t filler_array_min_size();
166
167 protected:
168 static inline void zap_filler_array_with(HeapWord* start, size_t words, juint value);
169 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
170 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
171
172 // Fill with a single array; caller must ensure filler_array_min_size() <=
173 // words <= filler_array_max_size().
174 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
175
176 // Fill with a single object (either an int array or a java.lang.Object).
177 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
178
179 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
180
181 // Verification functions
182 debug_only(static void check_for_valid_allocation_state();)
183
184 public:
185 enum Name {
186 None,
187 Serial,
188 Parallel,
189 G1,
190 Epsilon,
191 Z,
192 Shenandoah
193 };
194
195 protected:
196 // Get a pointer to the derived heap object. Used to implement
197 // derived class heap() functions rather than being called directly.
198 template<typename T>
199 static T* named_heap(Name kind) {
200 CollectedHeap* heap = Universe::heap();
201 assert(heap != nullptr, "Uninitialized heap");
202 assert(kind == heap->kind(), "Heap kind %u should be %u",
203 static_cast<uint>(heap->kind()), static_cast<uint>(kind));
204 return static_cast<T*>(heap);
205 }
206
207 public:
208
209 static inline size_t filler_array_max_size() {
210 return _filler_array_max_size;
211 }
212
213 static inline size_t stack_chunk_max_size() {
214 return _stack_chunk_max_size;
215 }
216
217 static inline Klass* filler_object_klass() {
218 return _filler_object_klass;
219 }
220
221 static inline void set_filler_object_klass(Klass* k) {
222 _filler_object_klass = k;
223 }
224
225 virtual Name kind() const = 0;
226
227 virtual const char* name() const = 0;
228
229 /**
230 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
231 * and JNI_OK on success.
232 */
233 virtual jint initialize() = 0;
234
235 // In many heaps, there will be a need to perform some initialization activities
236 // after the Universe is fully formed, but before general heap allocation is allowed.
237 // This is the correct place to place such initialization methods.
238 virtual void post_initialize();
239
240 // Stop any onging concurrent work and prepare for exit.
241 virtual void stop() {}
242
243 // Stop and resume concurrent GC threads interfering with safepoint operations
244 virtual void safepoint_synchronize_begin() {}
245 virtual void safepoint_synchronize_end() {}
246
247 void initialize_reserved_region(const ReservedHeapSpace& rs);
248
249 virtual size_t capacity() const = 0;
250 virtual size_t used() const = 0;
251
252 // Returns unused capacity.
253 virtual size_t unused() const;
254
255 // Historic gc information
256 size_t free_at_last_gc() const { return _capacity_at_last_gc - _used_at_last_gc; }
257 size_t used_at_last_gc() const { return _used_at_last_gc; }
258 void update_capacity_and_used_at_gc();
259
260 // Return "true" if the part of the heap that allocates Java
261 // objects has reached the maximal committed limit that it can
262 // reach, without a garbage collection.
263 virtual bool is_maximal_no_gc() const = 0;
264
265 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
266 // memory that the vm could make available for storing 'normal' java objects.
267 // This is based on the reserved address space, but should not include space
268 // that the vm uses internally for bookkeeping or temporary storage
269 // (e.g., in the case of the young gen, one of the survivor
270 // spaces).
271 virtual size_t max_capacity() const = 0;
272
273 // Returns "TRUE" iff "p" points into the committed areas of the heap.
274 // This method can be expensive so avoid using it in performance critical
275 // code.
276 virtual bool is_in(const void* p) const = 0;
277
278 DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == nullptr || is_in(p); })
279
280 void set_gc_cause(GCCause::Cause v);
281 GCCause::Cause gc_cause() { return _gc_cause; }
282
283 oop obj_allocate(Klass* klass, size_t size, TRAPS);
284 oop obj_buffer_allocate(Klass* klass, size_t size, TRAPS); // doesn't clear memory
285 virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS);
286 oop class_allocate(Klass* klass, size_t size, TRAPS);
287
288 // Utilities for turning raw memory into filler objects.
289 //
290 // min_fill_size() is the smallest region that can be filled.
291 // fill_with_objects() can fill arbitrary-sized regions of the heap using
292 // multiple objects. fill_with_object() is for regions known to be smaller
293 // than the largest array of integers; it uses a single object to fill the
294 // region and has slightly less overhead.
295 static size_t min_fill_size() {
296 return size_t(align_object_size(oopDesc::header_size()));
297 }
298
299 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
300
301 static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
302 static void fill_with_object(MemRegion region, bool zap = true) {
303 fill_with_object(region.start(), region.word_size(), zap);
304 }
305 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
306 fill_with_object(start, pointer_delta(end, start), zap);
307 }
308
309 virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
310 static constexpr size_t min_dummy_object_size() {
311 return oopDesc::header_size();
312 }
313
314 static size_t lab_alignment_reserve() {
315 assert(_lab_alignment_reserve != SIZE_MAX, "uninitialized");
316 return _lab_alignment_reserve;
317 }
318
319 // Some heaps may be in an unparseable state at certain times between
320 // collections. This may be necessary for efficient implementation of
321 // certain allocation-related activities. Calling this function before
322 // attempting to parse a heap ensures that the heap is in a parsable
323 // state (provided other concurrent activity does not introduce
324 // unparsability). It is normally expected, therefore, that this
325 // method is invoked with the world stopped.
326 // NOTE: if you override this method, make sure you call
327 // super::ensure_parsability so that the non-generational
328 // part of the work gets done. See implementation of
329 // CollectedHeap::ensure_parsability and, for instance,
330 // that of ParallelScavengeHeap::ensure_parsability().
331 // The argument "retire_tlabs" controls whether existing TLABs
332 // are merely filled or also retired, thus preventing further
333 // allocation from them and necessitating allocation of new TLABs.
334 virtual void ensure_parsability(bool retire_tlabs);
335
336 // The amount of space available for thread-local allocation buffers.
337 virtual size_t tlab_capacity(Thread *thr) const = 0;
338
339 // The amount of used space for thread-local allocation buffers for the given thread.
340 virtual size_t tlab_used(Thread *thr) const = 0;
341
342 virtual size_t max_tlab_size() const;
343
344 // An estimate of the maximum allocation that could be performed
345 // for thread-local allocation buffers without triggering any
346 // collection or expansion activity.
347 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const = 0;
348
349 // Perform a collection of the heap; intended for use in implementing
350 // "System.gc". This probably implies as full a collection as the
351 // "CollectedHeap" supports.
352 virtual void collect(GCCause::Cause cause) = 0;
353
354 // Perform a full collection
355 virtual void do_full_collection(bool clear_all_soft_refs) = 0;
356
357 // This interface assumes that it's being called by the
358 // vm thread. It collects the heap assuming that the
359 // heap lock is already held and that we are executing in
360 // the context of the vm thread.
361 virtual void collect_as_vm_thread(GCCause::Cause cause);
362
363 virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
364 size_t size,
365 Metaspace::MetadataType mdtype);
366
367 // Return true, if accesses to the object would require barriers.
368 // This is used by continuations to copy chunks of a thread stack into StackChunk object or out of a StackChunk
369 // object back into the thread stack. These chunks may contain references to objects. It is crucial that
370 // the GC does not attempt to traverse the object while we modify it, because its structure (oopmap) is changed
371 // when stack chunks are stored into it.
372 // StackChunk objects may be reused, the GC must not assume that a StackChunk object is always a freshly
373 // allocated object.
374 virtual bool requires_barriers(stackChunkOop obj) const = 0;
375
376 // Returns "true" iff there is a stop-world GC in progress.
377 bool is_stw_gc_active() const { return _is_stw_gc_active; }
378
379 // Total number of GC collections (started)
380 unsigned int total_collections() const { return _total_collections; }
381 unsigned int total_full_collections() const { return _total_full_collections;}
382
383 // Increment total number of GC collections (started)
384 void increment_total_collections(bool full = false) {
385 _total_collections++;
386 if (full) {
387 increment_total_full_collections();
388 }
389 }
390
391 void increment_total_full_collections() { _total_full_collections++; }
392
393 // Return the SoftRefPolicy for the heap;
394 SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
395
396 virtual MemoryUsage memory_usage();
397 virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
398 virtual GrowableArray<MemoryPool*> memory_pools() = 0;
399
400 // Iterate over all objects, calling "cl.do_object" on each.
401 virtual void object_iterate(ObjectClosure* cl) = 0;
402
403 virtual ParallelObjectIteratorImpl* parallel_object_iterator(uint thread_num) {
404 return nullptr;
405 }
406
407 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
408 virtual void keep_alive(oop obj) {}
409
410 // Perform any cleanup actions necessary before allowing a verification.
411 virtual void prepare_for_verify() = 0;
412
413 // Returns the longest time (in ms) that has elapsed since the last
414 // time that the whole heap has been examined by a garbage collection.
415 jlong millis_since_last_whole_heap_examined();
416 // GC should call this when the next whole heap analysis has completed to
417 // satisfy above requirement.
418 void record_whole_heap_examined_timestamp();
419
420 private:
421 // Generate any dumps preceding or following a full gc
422 void full_gc_dump(GCTimer* timer, bool before);
423
424 virtual void initialize_serviceability() = 0;
425
426 public:
427 void pre_full_gc_dump(GCTimer* timer);
428 void post_full_gc_dump(GCTimer* timer);
429
430 virtual VirtualSpaceSummary create_heap_space_summary();
431 GCHeapSummary create_heap_summary();
432
433 MetaspaceSummary create_metaspace_summary();
434
435 // GCs are free to represent the bit representation for null differently in memory,
436 // which is typically not observable when using the Access API. However, if for
437 // some reason a context doesn't allow using the Access API, then this function
438 // explicitly checks if the given memory location contains a null value.
439 virtual bool contains_null(const oop* p) const;
440
441 // Print heap information on the given outputStream.
442 virtual void print_on(outputStream* st) const = 0;
443 // The default behavior is to call print_on() on tty.
444 virtual void print() const;
445
446 // Print more detailed heap information on the given
447 // outputStream. The default behavior is to call print_on(). It is
448 // up to each subclass to override it and add any additional output
449 // it needs.
450 virtual void print_extended_on(outputStream* st) const {
451 print_on(st);
452 }
453
454 virtual void print_on_error(outputStream* st) const;
455
456 // Used to print information about locations in the hs_err file.
457 virtual bool print_location(outputStream* st, void* addr) const = 0;
458
459 // Iterator for all GC threads (other than VM thread)
460 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
461
462 // Print any relevant tracing info that flags imply.
463 // Default implementation does nothing.
464 virtual void print_tracing_info() const = 0;
465
466 void print_heap_before_gc();
467 void print_heap_after_gc();
468
469 // Registering and unregistering an nmethod (compiled code) with the heap.
470 virtual void register_nmethod(nmethod* nm) = 0;
471 virtual void unregister_nmethod(nmethod* nm) = 0;
472 virtual void verify_nmethod(nmethod* nm) = 0;
473
474 void trace_heap_before_gc(const GCTracer* gc_tracer);
475 void trace_heap_after_gc(const GCTracer* gc_tracer);
476
477 // Heap verification
478 virtual void verify(VerifyOption option) = 0;
479
480 // Return true if concurrent gc control via WhiteBox is supported by
481 // this collector. The default implementation returns false.
482 virtual bool supports_concurrent_gc_breakpoints() const;
483
484 // Workers used in non-GC safepoints for parallel safepoint cleanup. If this
485 // method returns null, cleanup tasks are done serially in the VMThread. See
486 // `SafepointSynchronize::do_cleanup_tasks` for details.
487 // GCs using a GC worker thread pool inside GC safepoints may opt to share
488 // that pool with non-GC safepoints, avoiding creating extraneous threads.
489 // Such sharing is safe, because GC safepoints and non-GC safepoints never
490 // overlap. For example, `G1CollectedHeap::workers()` (for GC safepoints) and
491 // `G1CollectedHeap::safepoint_workers()` (for non-GC safepoints) return the
492 // same thread-pool.
493 virtual WorkerThreads* safepoint_workers() { return nullptr; }
494
495 // Support for object pinning. This is used by JNI Get*Critical()
496 // and Release*Critical() family of functions. The GC must guarantee
497 // that pinned objects never move and don't get reclaimed as garbage.
498 // These functions are potentially safepointing.
499 virtual void pin_object(JavaThread* thread, oop obj) = 0;
500 virtual void unpin_object(JavaThread* thread, oop obj) = 0;
501
502 // Support for loading objects from CDS archive into the heap
503 // (usually as a snapshot of the old generation).
504 virtual bool can_load_archived_objects() const { return false; }
505 virtual HeapWord* allocate_loaded_archive_space(size_t size) { return nullptr; }
506 virtual void complete_loaded_archive_space(MemRegion archive_space) { }
507
508 virtual bool is_oop(oop object) const;
509 // Non product verification and debugging.
510 #ifndef PRODUCT
511 // Support for PromotionFailureALot. Return true if it's time to cause a
512 // promotion failure. The no-argument version uses
513 // this->_promotion_failure_alot_count as the counter.
514 bool promotion_should_fail(volatile size_t* count);
515 bool promotion_should_fail();
516
517 // Reset the PromotionFailureALot counters. Should be called at the end of a
518 // GC in which promotion failure occurred.
519 void reset_promotion_should_fail(volatile size_t* count);
520 void reset_promotion_should_fail();
521 #endif // #ifndef PRODUCT
522 };
523
524 // Class to set and reset the GC cause for a CollectedHeap.
525
526 class GCCauseSetter : StackObj {
527 CollectedHeap* _heap;
528 GCCause::Cause _previous_cause;
529 public:
530 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
531 _heap = heap;
532 _previous_cause = _heap->gc_cause();
533 _heap->set_gc_cause(cause);
534 }
535
536 ~GCCauseSetter() {
537 _heap->set_gc_cause(_previous_cause);
538 }
539 };
540
541 #endif // SHARE_GC_SHARED_COLLECTEDHEAP_HPP
--- EOF ---