1 /* 2 * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_COLLECTEDHEAP_HPP 26 #define SHARE_GC_SHARED_COLLECTEDHEAP_HPP 27 28 #include "gc/shared/gcCause.hpp" 29 #include "gc/shared/gcWhen.hpp" 30 #include "gc/shared/softRefPolicy.hpp" 31 #include "gc/shared/verifyOption.hpp" 32 #include "memory/allocation.hpp" 33 #include "memory/metaspace.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/stackChunkOop.hpp" 36 #include "runtime/handles.hpp" 37 #include "runtime/perfDataTypes.hpp" 38 #include "runtime/safepoint.hpp" 39 #include "services/memoryUsage.hpp" 40 #include "utilities/debug.hpp" 41 #include "utilities/formatBuffer.hpp" 42 #include "utilities/growableArray.hpp" 43 44 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This 45 // is an abstract class: there may be many different kinds of heaps. This 46 // class defines the functions that a heap must implement, and contains 47 // infrastructure common to all heaps. 48 49 class WorkerTask; 50 class AdaptiveSizePolicy; 51 class BarrierSet; 52 class GCHeapLog; 53 class GCHeapSummary; 54 class GCTimer; 55 class GCTracer; 56 class GCMemoryManager; 57 class MemoryPool; 58 class MetaspaceSummary; 59 class ReservedHeapSpace; 60 class Thread; 61 class ThreadClosure; 62 class VirtualSpaceSummary; 63 class WorkerThreads; 64 class nmethod; 65 66 class ParallelObjectIteratorImpl : public CHeapObj<mtGC> { 67 public: 68 virtual ~ParallelObjectIteratorImpl() {} 69 virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0; 70 }; 71 72 // User facing parallel object iterator. This is a StackObj, which ensures that 73 // the _impl is allocated and deleted in the scope of this object. This ensures 74 // the life cycle of the implementation is as required by ThreadsListHandle, 75 // which is sometimes used by the root iterators. 76 class ParallelObjectIterator : public StackObj { 77 ParallelObjectIteratorImpl* _impl; 78 79 public: 80 ParallelObjectIterator(uint thread_num); 81 ~ParallelObjectIterator(); 82 void object_iterate(ObjectClosure* cl, uint worker_id); 83 }; 84 85 // 86 // CollectedHeap 87 // SerialHeap 88 // G1CollectedHeap 89 // ParallelScavengeHeap 90 // ShenandoahHeap 91 // ZCollectedHeap 92 // 93 class CollectedHeap : public CHeapObj<mtGC> { 94 friend class VMStructs; 95 friend class JVMCIVMStructs; 96 friend class IsGCActiveMark; // Block structured external access to _is_gc_active 97 friend class DisableIsGCActiveMark; // Disable current IsGCActiveMark 98 friend class MemAllocator; 99 friend class ParallelObjectIterator; 100 101 private: 102 GCHeapLog* _gc_heap_log; 103 104 // Historic gc information 105 size_t _capacity_at_last_gc; 106 size_t _used_at_last_gc; 107 108 SoftRefPolicy _soft_ref_policy; 109 110 // First, set it to java_lang_Object. 111 // Then, set it to FillerObject after the FillerObject_klass loading is complete. 112 static Klass* _filler_object_klass; 113 114 protected: 115 // Not used by all GCs 116 MemRegion _reserved; 117 118 bool _is_gc_active; 119 120 // (Minimum) Alignment reserve for TLABs and PLABs. 121 static size_t _lab_alignment_reserve; 122 // Used for filler objects (static, but initialized in ctor). 123 static size_t _filler_array_max_size; 124 125 static size_t _stack_chunk_max_size; // 0 for no limit 126 127 // Last time the whole heap has been examined in support of RMI 128 // MaxObjectInspectionAge. 129 // This timestamp must be monotonically non-decreasing to avoid 130 // time-warp warnings. 131 jlong _last_whole_heap_examined_time_ns; 132 133 unsigned int _total_collections; // ... started 134 unsigned int _total_full_collections; // ... started 135 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) 136 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) 137 138 // Reason for current garbage collection. Should be set to 139 // a value reflecting no collection between collections. 140 GCCause::Cause _gc_cause; 141 GCCause::Cause _gc_lastcause; 142 PerfStringVariable* _perf_gc_cause; 143 PerfStringVariable* _perf_gc_lastcause; 144 145 // Constructor 146 CollectedHeap(); 147 148 // Create a new tlab. All TLAB allocations must go through this. 149 // To allow more flexible TLAB allocations min_size specifies 150 // the minimum size needed, while requested_size is the requested 151 // size based on ergonomics. The actually allocated size will be 152 // returned in actual_size. 153 virtual HeapWord* allocate_new_tlab(size_t min_size, 154 size_t requested_size, 155 size_t* actual_size); 156 157 // Reinitialize tlabs before resuming mutators. 158 virtual void resize_all_tlabs(); 159 160 // Raw memory allocation facilities 161 // The obj and array allocate methods are covers for these methods. 162 // mem_allocate() should never be 163 // called to allocate TLABs, only individual objects. 164 virtual HeapWord* mem_allocate(size_t size, 165 bool* gc_overhead_limit_was_exceeded) = 0; 166 167 // Filler object utilities. 168 static inline size_t filler_array_hdr_size(); 169 170 static size_t filler_array_min_size(); 171 172 protected: 173 static inline void zap_filler_array_with(HeapWord* start, size_t words, juint value); 174 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) 175 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);) 176 177 // Fill with a single array; caller must ensure filler_array_min_size() <= 178 // words <= filler_array_max_size(). 179 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true); 180 181 // Fill with a single object (either an int array or a java.lang.Object). 182 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); 183 184 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer); 185 186 // Verification functions 187 debug_only(static void check_for_valid_allocation_state();) 188 189 public: 190 enum Name { 191 None, 192 Serial, 193 Parallel, 194 G1, 195 Epsilon, 196 Z, 197 Shenandoah 198 }; 199 200 protected: 201 // Get a pointer to the derived heap object. Used to implement 202 // derived class heap() functions rather than being called directly. 203 template<typename T> 204 static T* named_heap(Name kind) { 205 CollectedHeap* heap = Universe::heap(); 206 assert(heap != nullptr, "Uninitialized heap"); 207 assert(kind == heap->kind(), "Heap kind %u should be %u", 208 static_cast<uint>(heap->kind()), static_cast<uint>(kind)); 209 return static_cast<T*>(heap); 210 } 211 212 public: 213 214 static inline size_t filler_array_max_size() { 215 return _filler_array_max_size; 216 } 217 218 static inline size_t stack_chunk_max_size() { 219 return _stack_chunk_max_size; 220 } 221 222 static inline Klass* filler_object_klass() { 223 return _filler_object_klass; 224 } 225 226 static inline void set_filler_object_klass(Klass* k) { 227 _filler_object_klass = k; 228 } 229 230 virtual Name kind() const = 0; 231 232 virtual const char* name() const = 0; 233 234 /** 235 * Returns JNI error code JNI_ENOMEM if memory could not be allocated, 236 * and JNI_OK on success. 237 */ 238 virtual jint initialize() = 0; 239 240 // In many heaps, there will be a need to perform some initialization activities 241 // after the Universe is fully formed, but before general heap allocation is allowed. 242 // This is the correct place to place such initialization methods. 243 virtual void post_initialize(); 244 245 // Stop any onging concurrent work and prepare for exit. 246 virtual void stop() {} 247 248 // Stop and resume concurrent GC threads interfering with safepoint operations 249 virtual void safepoint_synchronize_begin() {} 250 virtual void safepoint_synchronize_end() {} 251 252 void initialize_reserved_region(const ReservedHeapSpace& rs); 253 254 virtual size_t capacity() const = 0; 255 virtual size_t used() const = 0; 256 257 // Returns unused capacity. 258 virtual size_t unused() const; 259 260 // Historic gc information 261 size_t free_at_last_gc() const { return _capacity_at_last_gc - _used_at_last_gc; } 262 size_t used_at_last_gc() const { return _used_at_last_gc; } 263 void update_capacity_and_used_at_gc(); 264 265 // Return "true" if the part of the heap that allocates Java 266 // objects has reached the maximal committed limit that it can 267 // reach, without a garbage collection. 268 virtual bool is_maximal_no_gc() const = 0; 269 270 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of 271 // memory that the vm could make available for storing 'normal' java objects. 272 // This is based on the reserved address space, but should not include space 273 // that the vm uses internally for bookkeeping or temporary storage 274 // (e.g., in the case of the young gen, one of the survivor 275 // spaces). 276 virtual size_t max_capacity() const = 0; 277 278 // Returns "TRUE" iff "p" points into the committed areas of the heap. 279 // This method can be expensive so avoid using it in performance critical 280 // code. 281 virtual bool is_in(const void* p) const = 0; 282 283 DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == nullptr || is_in(p); }) 284 285 void set_gc_cause(GCCause::Cause v); 286 GCCause::Cause gc_cause() { return _gc_cause; } 287 288 oop obj_allocate(Klass* klass, size_t size, TRAPS); 289 oop obj_buffer_allocate(Klass* klass, size_t size, TRAPS); // doesn't clear memory 290 virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS); 291 oop class_allocate(Klass* klass, size_t size, TRAPS); 292 293 // Utilities for turning raw memory into filler objects. 294 // 295 // min_fill_size() is the smallest region that can be filled. 296 // fill_with_objects() can fill arbitrary-sized regions of the heap using 297 // multiple objects. fill_with_object() is for regions known to be smaller 298 // than the largest array of integers; it uses a single object to fill the 299 // region and has slightly less overhead. 300 static size_t min_fill_size() { 301 return size_t(align_object_size(oopDesc::header_size())); 302 } 303 304 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true); 305 306 static void fill_with_object(HeapWord* start, size_t words, bool zap = true); 307 static void fill_with_object(MemRegion region, bool zap = true) { 308 fill_with_object(region.start(), region.word_size(), zap); 309 } 310 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) { 311 fill_with_object(start, pointer_delta(end, start), zap); 312 } 313 314 virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap); 315 static constexpr size_t min_dummy_object_size() { 316 return oopDesc::header_size(); 317 } 318 319 static size_t lab_alignment_reserve() { 320 assert(_lab_alignment_reserve != SIZE_MAX, "uninitialized"); 321 return _lab_alignment_reserve; 322 } 323 324 // Some heaps may be in an unparseable state at certain times between 325 // collections. This may be necessary for efficient implementation of 326 // certain allocation-related activities. Calling this function before 327 // attempting to parse a heap ensures that the heap is in a parsable 328 // state (provided other concurrent activity does not introduce 329 // unparsability). It is normally expected, therefore, that this 330 // method is invoked with the world stopped. 331 // NOTE: if you override this method, make sure you call 332 // super::ensure_parsability so that the non-generational 333 // part of the work gets done. See implementation of 334 // CollectedHeap::ensure_parsability and, for instance, 335 // that of ParallelScavengeHeap::ensure_parsability(). 336 // The argument "retire_tlabs" controls whether existing TLABs 337 // are merely filled or also retired, thus preventing further 338 // allocation from them and necessitating allocation of new TLABs. 339 virtual void ensure_parsability(bool retire_tlabs); 340 341 // The amount of space available for thread-local allocation buffers. 342 virtual size_t tlab_capacity(Thread *thr) const = 0; 343 344 // The amount of used space for thread-local allocation buffers for the given thread. 345 virtual size_t tlab_used(Thread *thr) const = 0; 346 347 virtual size_t max_tlab_size() const; 348 349 // An estimate of the maximum allocation that could be performed 350 // for thread-local allocation buffers without triggering any 351 // collection or expansion activity. 352 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const { 353 guarantee(false, "thread-local allocation buffers not supported"); 354 return 0; 355 } 356 357 // If a GC uses a stack watermark barrier, the stack processing is lazy, concurrent, 358 // incremental and cooperative. In order for that to work well, mechanisms that stop 359 // another thread might want to ensure its roots are in a sane state. 360 virtual bool uses_stack_watermark_barrier() const { return false; } 361 362 // Perform a collection of the heap; intended for use in implementing 363 // "System.gc". This probably implies as full a collection as the 364 // "CollectedHeap" supports. 365 virtual void collect(GCCause::Cause cause) = 0; 366 367 // Perform a full collection 368 virtual void do_full_collection(bool clear_all_soft_refs) = 0; 369 370 // This interface assumes that it's being called by the 371 // vm thread. It collects the heap assuming that the 372 // heap lock is already held and that we are executing in 373 // the context of the vm thread. 374 virtual void collect_as_vm_thread(GCCause::Cause cause); 375 376 virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, 377 size_t size, 378 Metaspace::MetadataType mdtype); 379 380 // Return true, if accesses to the object would require barriers. 381 // This is used by continuations to copy chunks of a thread stack into StackChunk object or out of a StackChunk 382 // object back into the thread stack. These chunks may contain references to objects. It is crucial that 383 // the GC does not attempt to traverse the object while we modify it, because its structure (oopmap) is changed 384 // when stack chunks are stored into it. 385 // StackChunk objects may be reused, the GC must not assume that a StackChunk object is always a freshly 386 // allocated object. 387 virtual bool requires_barriers(stackChunkOop obj) const = 0; 388 389 // Returns "true" iff there is a stop-world GC in progress. (I assume 390 // that it should answer "false" for the concurrent part of a concurrent 391 // collector -- dld). 392 bool is_gc_active() const { return _is_gc_active; } 393 394 // Total number of GC collections (started) 395 unsigned int total_collections() const { return _total_collections; } 396 unsigned int total_full_collections() const { return _total_full_collections;} 397 398 // Increment total number of GC collections (started) 399 void increment_total_collections(bool full = false) { 400 _total_collections++; 401 if (full) { 402 increment_total_full_collections(); 403 } 404 } 405 406 void increment_total_full_collections() { _total_full_collections++; } 407 408 // Return the SoftRefPolicy for the heap; 409 SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; } 410 411 virtual MemoryUsage memory_usage(); 412 virtual GrowableArray<GCMemoryManager*> memory_managers() = 0; 413 virtual GrowableArray<MemoryPool*> memory_pools() = 0; 414 415 // Iterate over all objects, calling "cl.do_object" on each. 416 virtual void object_iterate(ObjectClosure* cl) = 0; 417 418 protected: 419 virtual ParallelObjectIteratorImpl* parallel_object_iterator(uint thread_num) { 420 return nullptr; 421 } 422 423 public: 424 // Keep alive an object that was loaded with AS_NO_KEEPALIVE. 425 virtual void keep_alive(oop obj) {} 426 427 // Perform any cleanup actions necessary before allowing a verification. 428 virtual void prepare_for_verify() = 0; 429 430 // Returns the longest time (in ms) that has elapsed since the last 431 // time that the whole heap has been examined by a garbage collection. 432 jlong millis_since_last_whole_heap_examined(); 433 // GC should call this when the next whole heap analysis has completed to 434 // satisfy above requirement. 435 void record_whole_heap_examined_timestamp(); 436 437 private: 438 // Generate any dumps preceding or following a full gc 439 void full_gc_dump(GCTimer* timer, bool before); 440 441 virtual void initialize_serviceability() = 0; 442 443 public: 444 void pre_full_gc_dump(GCTimer* timer); 445 void post_full_gc_dump(GCTimer* timer); 446 447 virtual VirtualSpaceSummary create_heap_space_summary(); 448 GCHeapSummary create_heap_summary(); 449 450 MetaspaceSummary create_metaspace_summary(); 451 452 // GCs are free to represent the bit representation for null differently in memory, 453 // which is typically not observable when using the Access API. However, if for 454 // some reason a context doesn't allow using the Access API, then this function 455 // explicitly checks if the given memory location contains a null value. 456 virtual bool contains_null(const oop* p) const; 457 458 // Print heap information on the given outputStream. 459 virtual void print_on(outputStream* st) const = 0; 460 // The default behavior is to call print_on() on tty. 461 virtual void print() const; 462 463 // Print more detailed heap information on the given 464 // outputStream. The default behavior is to call print_on(). It is 465 // up to each subclass to override it and add any additional output 466 // it needs. 467 virtual void print_extended_on(outputStream* st) const { 468 print_on(st); 469 } 470 471 virtual void print_on_error(outputStream* st) const; 472 473 // Used to print information about locations in the hs_err file. 474 virtual bool print_location(outputStream* st, void* addr) const = 0; 475 476 // Iterator for all GC threads (other than VM thread) 477 virtual void gc_threads_do(ThreadClosure* tc) const = 0; 478 479 // Print any relevant tracing info that flags imply. 480 // Default implementation does nothing. 481 virtual void print_tracing_info() const = 0; 482 483 void print_heap_before_gc(); 484 void print_heap_after_gc(); 485 486 // Registering and unregistering an nmethod (compiled code) with the heap. 487 virtual void register_nmethod(nmethod* nm) = 0; 488 virtual void unregister_nmethod(nmethod* nm) = 0; 489 virtual void verify_nmethod(nmethod* nm) = 0; 490 491 void trace_heap_before_gc(const GCTracer* gc_tracer); 492 void trace_heap_after_gc(const GCTracer* gc_tracer); 493 494 // Heap verification 495 virtual void verify(VerifyOption option) = 0; 496 497 // Return true if concurrent gc control via WhiteBox is supported by 498 // this collector. The default implementation returns false. 499 virtual bool supports_concurrent_gc_breakpoints() const; 500 501 // Workers used in non-GC safepoints for parallel safepoint cleanup. If this 502 // method returns null, cleanup tasks are done serially in the VMThread. See 503 // `SafepointSynchronize::do_cleanup_tasks` for details. 504 // GCs using a GC worker thread pool inside GC safepoints may opt to share 505 // that pool with non-GC safepoints, avoiding creating extraneous threads. 506 // Such sharing is safe, because GC safepoints and non-GC safepoints never 507 // overlap. For example, `G1CollectedHeap::workers()` (for GC safepoints) and 508 // `G1CollectedHeap::safepoint_workers()` (for non-GC safepoints) return the 509 // same thread-pool. 510 virtual WorkerThreads* safepoint_workers() { return nullptr; } 511 512 // Support for object pinning. This is used by JNI Get*Critical() 513 // and Release*Critical() family of functions. The GC must guarantee 514 // that pinned objects never move and don't get reclaimed as garbage. 515 // These functions are potentially safepointing. 516 virtual void pin_object(JavaThread* thread, oop obj) = 0; 517 virtual void unpin_object(JavaThread* thread, oop obj) = 0; 518 519 // Support for loading objects from CDS archive into the heap 520 // (usually as a snapshot of the old generation). 521 virtual bool can_load_archived_objects() const { return false; } 522 virtual HeapWord* allocate_loaded_archive_space(size_t size) { return nullptr; } 523 virtual void complete_loaded_archive_space(MemRegion archive_space) { } 524 525 virtual bool is_oop(oop object) const; 526 // Non product verification and debugging. 527 #ifndef PRODUCT 528 // Support for PromotionFailureALot. Return true if it's time to cause a 529 // promotion failure. The no-argument version uses 530 // this->_promotion_failure_alot_count as the counter. 531 bool promotion_should_fail(volatile size_t* count); 532 bool promotion_should_fail(); 533 534 // Reset the PromotionFailureALot counters. Should be called at the end of a 535 // GC in which promotion failure occurred. 536 void reset_promotion_should_fail(volatile size_t* count); 537 void reset_promotion_should_fail(); 538 #endif // #ifndef PRODUCT 539 }; 540 541 // Class to set and reset the GC cause for a CollectedHeap. 542 543 class GCCauseSetter : StackObj { 544 CollectedHeap* _heap; 545 GCCause::Cause _previous_cause; 546 public: 547 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) { 548 _heap = heap; 549 _previous_cause = _heap->gc_cause(); 550 _heap->set_gc_cause(cause); 551 } 552 553 ~GCCauseSetter() { 554 _heap->set_gc_cause(_previous_cause); 555 } 556 }; 557 558 #endif // SHARE_GC_SHARED_COLLECTEDHEAP_HPP