1 /*
  2  * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_COLLECTEDHEAP_HPP
 26 #define SHARE_GC_SHARED_COLLECTEDHEAP_HPP
 27 
 28 #include "gc/shared/gcCause.hpp"
 29 #include "gc/shared/gcWhen.hpp"
 30 #include "gc/shared/verifyOption.hpp"
 31 #include "memory/allocation.hpp"
 32 #include "memory/metaspace.hpp"
 33 #include "memory/universe.hpp"
 34 #include "oops/stackChunkOop.hpp"
 35 #include "runtime/handles.hpp"
 36 #include "runtime/perfDataTypes.hpp"
 37 #include "runtime/safepoint.hpp"
 38 #include "services/memoryUsage.hpp"
 39 #include "utilities/debug.hpp"
 40 #include "utilities/formatBuffer.hpp"
 41 #include "utilities/growableArray.hpp"
 42 
 43 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
 44 // is an abstract class: there may be many different kinds of heaps.  This
 45 // class defines the functions that a heap must implement, and contains
 46 // infrastructure common to all heaps.
 47 
 48 class WorkerTask;
 49 class AdaptiveSizePolicy;
 50 class BarrierSet;
 51 class GCHeapLog;
 52 class GCHeapSummary;
 53 class GCTimer;
 54 class GCTracer;
 55 class GCMemoryManager;
 56 class MemoryPool;
 57 class MetaspaceSummary;
 58 class ReservedHeapSpace;
 59 class SoftRefPolicy;
 60 class Thread;
 61 class ThreadClosure;
 62 class VirtualSpaceSummary;
 63 class WorkerThreads;
 64 class nmethod;
 65 
 66 class ParallelObjectIteratorImpl : public CHeapObj<mtGC> {
 67 public:
 68   virtual ~ParallelObjectIteratorImpl() {}
 69   virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0;
 70 };
 71 
 72 // User facing parallel object iterator. This is a StackObj, which ensures that
 73 // the _impl is allocated and deleted in the scope of this object. This ensures
 74 // the life cycle of the implementation is as required by ThreadsListHandle,
 75 // which is sometimes used by the root iterators.
 76 class ParallelObjectIterator : public StackObj {
 77   ParallelObjectIteratorImpl* _impl;
 78 
 79 public:
 80   ParallelObjectIterator(uint thread_num);
 81   ~ParallelObjectIterator();
 82   void object_iterate(ObjectClosure* cl, uint worker_id);
 83 };
 84 
 85 //
 86 // CollectedHeap
 87 //   GenCollectedHeap
 88 //     SerialHeap
 89 //   G1CollectedHeap
 90 //   ParallelScavengeHeap
 91 //   ShenandoahHeap
 92 //   ZCollectedHeap
 93 //
 94 class CollectedHeap : public CHeapObj<mtGC> {
 95   friend class VMStructs;
 96   friend class JVMCIVMStructs;
 97   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
 98   friend class DisableIsGCActiveMark; // Disable current IsGCActiveMark
 99   friend class MemAllocator;
100   friend class ParallelObjectIterator;
101 
102  private:
103   GCHeapLog* _gc_heap_log;
104 
105   // Historic gc information
106   size_t _capacity_at_last_gc;
107   size_t _used_at_last_gc;
108 
109   // First, set it to java_lang_Object.
110   // Then, set it to FillerObject after the FillerObject_klass loading is complete.
111   static Klass* _filler_object_klass;
112 
113  protected:
114   // Not used by all GCs
115   MemRegion _reserved;
116 
117   bool _is_gc_active;
118 
119   // (Minimum) Alignment reserve for TLABs and PLABs.
120   static size_t _lab_alignment_reserve;
121   // Used for filler objects (static, but initialized in ctor).
122   static size_t _filler_array_max_size;
123 
124   static size_t _stack_chunk_max_size; // 0 for no limit
125 
126   // Last time the whole heap has been examined in support of RMI
127   // MaxObjectInspectionAge.
128   // This timestamp must be monotonically non-decreasing to avoid
129   // time-warp warnings.
130   jlong _last_whole_heap_examined_time_ns;
131 
132   unsigned int _total_collections;          // ... started
133   unsigned int _total_full_collections;     // ... started
134   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
135   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
136 
137   // Reason for current garbage collection.  Should be set to
138   // a value reflecting no collection between collections.
139   GCCause::Cause _gc_cause;
140   GCCause::Cause _gc_lastcause;
141   PerfStringVariable* _perf_gc_cause;
142   PerfStringVariable* _perf_gc_lastcause;
143 
144   // Constructor
145   CollectedHeap();
146 
147   // Create a new tlab. All TLAB allocations must go through this.
148   // To allow more flexible TLAB allocations min_size specifies
149   // the minimum size needed, while requested_size is the requested
150   // size based on ergonomics. The actually allocated size will be
151   // returned in actual_size.
152   virtual HeapWord* allocate_new_tlab(size_t min_size,
153                                       size_t requested_size,
154                                       size_t* actual_size);
155 
156   // Reinitialize tlabs before resuming mutators.
157   virtual void resize_all_tlabs();
158 
159   // Raw memory allocation facilities
160   // The obj and array allocate methods are covers for these methods.
161   // mem_allocate() should never be
162   // called to allocate TLABs, only individual objects.
163   virtual HeapWord* mem_allocate(size_t size,
164                                  bool* gc_overhead_limit_was_exceeded) = 0;
165 
166   // Filler object utilities.
167   static inline size_t filler_array_hdr_size();
168 
169   static size_t filler_array_min_size();
170 
171 protected:
172   static inline void zap_filler_array_with(HeapWord* start, size_t words, juint value);
173   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
174   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
175 
176   // Fill with a single array; caller must ensure filler_array_min_size() <=
177   // words <= filler_array_max_size().
178   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
179 
180   // Fill with a single object (either an int array or a java.lang.Object).
181   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
182 
183   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
184 
185   // Verification functions
186   debug_only(static void check_for_valid_allocation_state();)
187 
188  public:
189   enum Name {
190     None,
191     Serial,
192     Parallel,
193     G1,
194     Epsilon,
195     Z,
196     Shenandoah
197   };
198 
199  protected:
200   // Get a pointer to the derived heap object.  Used to implement
201   // derived class heap() functions rather than being called directly.
202   template<typename T>
203   static T* named_heap(Name kind) {
204     CollectedHeap* heap = Universe::heap();
205     assert(heap != nullptr, "Uninitialized heap");
206     assert(kind == heap->kind(), "Heap kind %u should be %u",
207            static_cast<uint>(heap->kind()), static_cast<uint>(kind));
208     return static_cast<T*>(heap);
209   }
210 
211  public:
212 
213   static inline size_t filler_array_max_size() {
214     return _filler_array_max_size;
215   }
216 
217   static inline size_t stack_chunk_max_size() {
218     return _stack_chunk_max_size;
219   }
220 
221   static inline Klass* filler_object_klass() {
222     return _filler_object_klass;
223   }
224 
225   static inline void set_filler_object_klass(Klass* k) {
226     _filler_object_klass = k;
227   }
228 
229   virtual Name kind() const = 0;
230 
231   virtual const char* name() const = 0;
232 
233   /**
234    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
235    * and JNI_OK on success.
236    */
237   virtual jint initialize() = 0;
238 
239   // In many heaps, there will be a need to perform some initialization activities
240   // after the Universe is fully formed, but before general heap allocation is allowed.
241   // This is the correct place to place such initialization methods.
242   virtual void post_initialize();
243 
244   // Stop any onging concurrent work and prepare for exit.
245   virtual void stop() {}
246 
247   // Stop and resume concurrent GC threads interfering with safepoint operations
248   virtual void safepoint_synchronize_begin() {}
249   virtual void safepoint_synchronize_end() {}
250 
251   void initialize_reserved_region(const ReservedHeapSpace& rs);
252 
253   virtual size_t capacity() const = 0;
254   virtual size_t used() const = 0;
255 
256   // Returns unused capacity.
257   virtual size_t unused() const;
258 
259   // Historic gc information
260   size_t free_at_last_gc() const { return _capacity_at_last_gc - _used_at_last_gc; }
261   size_t used_at_last_gc() const { return _used_at_last_gc; }
262   void update_capacity_and_used_at_gc();
263 
264   // Return "true" if the part of the heap that allocates Java
265   // objects has reached the maximal committed limit that it can
266   // reach, without a garbage collection.
267   virtual bool is_maximal_no_gc() const = 0;
268 
269   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
270   // memory that the vm could make available for storing 'normal' java objects.
271   // This is based on the reserved address space, but should not include space
272   // that the vm uses internally for bookkeeping or temporary storage
273   // (e.g., in the case of the young gen, one of the survivor
274   // spaces).
275   virtual size_t max_capacity() const = 0;
276 
277   // Returns "TRUE" iff "p" points into the committed areas of the heap.
278   // This method can be expensive so avoid using it in performance critical
279   // code.
280   virtual bool is_in(const void* p) const = 0;
281 
282   DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == nullptr || is_in(p); })
283 
284   void set_gc_cause(GCCause::Cause v);
285   GCCause::Cause gc_cause() { return _gc_cause; }
286 
287   oop obj_allocate(Klass* klass, size_t size, TRAPS);
288   virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS);
289   oop class_allocate(Klass* klass, size_t size, TRAPS);
290 
291   // Utilities for turning raw memory into filler objects.
292   //
293   // min_fill_size() is the smallest region that can be filled.
294   // fill_with_objects() can fill arbitrary-sized regions of the heap using
295   // multiple objects.  fill_with_object() is for regions known to be smaller
296   // than the largest array of integers; it uses a single object to fill the
297   // region and has slightly less overhead.
298   static size_t min_fill_size() {
299     return size_t(align_object_size(oopDesc::header_size()));
300   }
301 
302   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
303 
304   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
305   static void fill_with_object(MemRegion region, bool zap = true) {
306     fill_with_object(region.start(), region.word_size(), zap);
307   }
308   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
309     fill_with_object(start, pointer_delta(end, start), zap);
310   }
311 
312   virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
313   static constexpr size_t min_dummy_object_size() {
314     return oopDesc::header_size();
315   }
316 
317   static size_t lab_alignment_reserve() {
318     assert(_lab_alignment_reserve != SIZE_MAX, "uninitialized");
319     return _lab_alignment_reserve;
320   }
321 
322   // Some heaps may be in an unparseable state at certain times between
323   // collections. This may be necessary for efficient implementation of
324   // certain allocation-related activities. Calling this function before
325   // attempting to parse a heap ensures that the heap is in a parsable
326   // state (provided other concurrent activity does not introduce
327   // unparsability). It is normally expected, therefore, that this
328   // method is invoked with the world stopped.
329   // NOTE: if you override this method, make sure you call
330   // super::ensure_parsability so that the non-generational
331   // part of the work gets done. See implementation of
332   // CollectedHeap::ensure_parsability and, for instance,
333   // that of ParallelScavengeHeap::ensure_parsability().
334   // The argument "retire_tlabs" controls whether existing TLABs
335   // are merely filled or also retired, thus preventing further
336   // allocation from them and necessitating allocation of new TLABs.
337   virtual void ensure_parsability(bool retire_tlabs);
338 
339   // The amount of space available for thread-local allocation buffers.
340   virtual size_t tlab_capacity(Thread *thr) const = 0;
341 
342   // The amount of used space for thread-local allocation buffers for the given thread.
343   virtual size_t tlab_used(Thread *thr) const = 0;
344 
345   virtual size_t max_tlab_size() const;
346 
347   // An estimate of the maximum allocation that could be performed
348   // for thread-local allocation buffers without triggering any
349   // collection or expansion activity.
350   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
351     guarantee(false, "thread-local allocation buffers not supported");
352     return 0;
353   }
354 
355   // If a GC uses a stack watermark barrier, the stack processing is lazy, concurrent,
356   // incremental and cooperative. In order for that to work well, mechanisms that stop
357   // another thread might want to ensure its roots are in a sane state.
358   virtual bool uses_stack_watermark_barrier() const { return false; }
359 
360   // Perform a collection of the heap; intended for use in implementing
361   // "System.gc".  This probably implies as full a collection as the
362   // "CollectedHeap" supports.
363   virtual void collect(GCCause::Cause cause) = 0;
364 
365   // Perform a full collection
366   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
367 
368   // This interface assumes that it's being called by the
369   // vm thread. It collects the heap assuming that the
370   // heap lock is already held and that we are executing in
371   // the context of the vm thread.
372   virtual void collect_as_vm_thread(GCCause::Cause cause);
373 
374   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
375                                                        size_t size,
376                                                        Metaspace::MetadataType mdtype);
377 
378   // Return true, if accesses to the object would require barriers.
379   // This is used by continuations to copy chunks of a thread stack into StackChunk object or out of a StackChunk
380   // object back into the thread stack. These chunks may contain references to objects. It is crucial that
381   // the GC does not attempt to traverse the object while we modify it, because its structure (oopmap) is changed
382   // when stack chunks are stored into it.
383   // StackChunk objects may be reused, the GC must not assume that a StackChunk object is always a freshly
384   // allocated object.
385   virtual bool requires_barriers(stackChunkOop obj) const = 0;
386 
387   // Returns "true" iff there is a stop-world GC in progress.  (I assume
388   // that it should answer "false" for the concurrent part of a concurrent
389   // collector -- dld).
390   bool is_gc_active() const { return _is_gc_active; }
391 
392   // Total number of GC collections (started)
393   unsigned int total_collections() const { return _total_collections; }
394   unsigned int total_full_collections() const { return _total_full_collections;}
395 
396   // Increment total number of GC collections (started)
397   void increment_total_collections(bool full = false) {
398     _total_collections++;
399     if (full) {
400       increment_total_full_collections();
401     }
402   }
403 
404   void increment_total_full_collections() { _total_full_collections++; }
405 
406   // Return the SoftRefPolicy for the heap;
407   virtual SoftRefPolicy* soft_ref_policy() = 0;
408 
409   virtual MemoryUsage memory_usage();
410   virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
411   virtual GrowableArray<MemoryPool*> memory_pools() = 0;
412 
413   // Iterate over all objects, calling "cl.do_object" on each.
414   virtual void object_iterate(ObjectClosure* cl) = 0;
415 
416  protected:
417   virtual ParallelObjectIteratorImpl* parallel_object_iterator(uint thread_num) {
418     return nullptr;
419   }
420 
421  public:
422   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
423   virtual void keep_alive(oop obj) {}
424 
425   // Perform any cleanup actions necessary before allowing a verification.
426   virtual void prepare_for_verify() = 0;
427 
428   // Returns the longest time (in ms) that has elapsed since the last
429   // time that the whole heap has been examined by a garbage collection.
430   jlong millis_since_last_whole_heap_examined();
431   // GC should call this when the next whole heap analysis has completed to
432   // satisfy above requirement.
433   void record_whole_heap_examined_timestamp();
434 
435  private:
436   // Generate any dumps preceding or following a full gc
437   void full_gc_dump(GCTimer* timer, bool before);
438 
439   virtual void initialize_serviceability() = 0;
440 
441  public:
442   void pre_full_gc_dump(GCTimer* timer);
443   void post_full_gc_dump(GCTimer* timer);
444 
445   virtual VirtualSpaceSummary create_heap_space_summary();
446   GCHeapSummary create_heap_summary();
447 
448   MetaspaceSummary create_metaspace_summary();
449 
450   // GCs are free to represent the bit representation for null differently in memory,
451   // which is typically not observable when using the Access API. However, if for
452   // some reason a context doesn't allow using the Access API, then this function
453   // explicitly checks if the given memory location contains a null value.
454   virtual bool contains_null(const oop* p) const;
455 
456   // Print heap information on the given outputStream.
457   virtual void print_on(outputStream* st) const = 0;
458   // The default behavior is to call print_on() on tty.
459   virtual void print() const;
460 
461   // Print more detailed heap information on the given
462   // outputStream. The default behavior is to call print_on(). It is
463   // up to each subclass to override it and add any additional output
464   // it needs.
465   virtual void print_extended_on(outputStream* st) const {
466     print_on(st);
467   }
468 
469   virtual void print_on_error(outputStream* st) const;
470 
471   // Used to print information about locations in the hs_err file.
472   virtual bool print_location(outputStream* st, void* addr) const = 0;
473 
474   // Iterator for all GC threads (other than VM thread)
475   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
476 
477   // Print any relevant tracing info that flags imply.
478   // Default implementation does nothing.
479   virtual void print_tracing_info() const = 0;
480 
481   void print_heap_before_gc();
482   void print_heap_after_gc();
483 
484   // Registering and unregistering an nmethod (compiled code) with the heap.
485   virtual void register_nmethod(nmethod* nm) = 0;
486   virtual void unregister_nmethod(nmethod* nm) = 0;
487   virtual void verify_nmethod(nmethod* nm) = 0;
488 
489   void trace_heap_before_gc(const GCTracer* gc_tracer);
490   void trace_heap_after_gc(const GCTracer* gc_tracer);
491 
492   // Heap verification
493   virtual void verify(VerifyOption option) = 0;
494 
495   // Return true if concurrent gc control via WhiteBox is supported by
496   // this collector.  The default implementation returns false.
497   virtual bool supports_concurrent_gc_breakpoints() const;
498 
499   // Workers used in non-GC safepoints for parallel safepoint cleanup. If this
500   // method returns null, cleanup tasks are done serially in the VMThread. See
501   // `SafepointSynchronize::do_cleanup_tasks` for details.
502   // GCs using a GC worker thread pool inside GC safepoints may opt to share
503   // that pool with non-GC safepoints, avoiding creating extraneous threads.
504   // Such sharing is safe, because GC safepoints and non-GC safepoints never
505   // overlap. For example, `G1CollectedHeap::workers()` (for GC safepoints) and
506   // `G1CollectedHeap::safepoint_workers()` (for non-GC safepoints) return the
507   // same thread-pool.
508   virtual WorkerThreads* safepoint_workers() { return nullptr; }
509 
510   // Support for object pinning. This is used by JNI Get*Critical()
511   // and Release*Critical() family of functions. The GC must guarantee
512   // that pinned objects never move and don't get reclaimed as garbage.
513   // These functions are potentially safepointing.
514   virtual void pin_object(JavaThread* thread, oop obj) = 0;
515   virtual void unpin_object(JavaThread* thread, oop obj) = 0;
516 
517   // Support for loading objects from CDS archive into the heap
518   // (usually as a snapshot of the old generation).
519   virtual bool can_load_archived_objects() const { return false; }
520   virtual HeapWord* allocate_loaded_archive_space(size_t size) { return nullptr; }
521   virtual void complete_loaded_archive_space(MemRegion archive_space) { }
522 
523   virtual bool is_oop(oop object) const;
524   // Non product verification and debugging.
525 #ifndef PRODUCT
526   // Support for PromotionFailureALot.  Return true if it's time to cause a
527   // promotion failure.  The no-argument version uses
528   // this->_promotion_failure_alot_count as the counter.
529   bool promotion_should_fail(volatile size_t* count);
530   bool promotion_should_fail();
531 
532   // Reset the PromotionFailureALot counters.  Should be called at the end of a
533   // GC in which promotion failure occurred.
534   void reset_promotion_should_fail(volatile size_t* count);
535   void reset_promotion_should_fail();
536 #endif  // #ifndef PRODUCT
537 };
538 
539 // Class to set and reset the GC cause for a CollectedHeap.
540 
541 class GCCauseSetter : StackObj {
542   CollectedHeap* _heap;
543   GCCause::Cause _previous_cause;
544  public:
545   GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
546     _heap = heap;
547     _previous_cause = _heap->gc_cause();
548     _heap->set_gc_cause(cause);
549   }
550 
551   ~GCCauseSetter() {
552     _heap->set_gc_cause(_previous_cause);
553   }
554 };
555 
556 #endif // SHARE_GC_SHARED_COLLECTEDHEAP_HPP