1 /*
  2  * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SHARED_COLLECTEDHEAP_HPP
 26 #define SHARE_GC_SHARED_COLLECTEDHEAP_HPP
 27 
 28 #include "gc/shared/gcCause.hpp"
 29 #include "gc/shared/gcWhen.hpp"
 30 #include "gc/shared/verifyOption.hpp"
 31 #include "memory/allocation.hpp"
 32 #include "memory/metaspace.hpp"
 33 #include "memory/universe.hpp"
 34 #include "runtime/handles.hpp"
 35 #include "runtime/perfDataTypes.hpp"
 36 #include "runtime/safepoint.hpp"
 37 #include "services/memoryUsage.hpp"
 38 #include "utilities/debug.hpp"
 39 #include "utilities/formatBuffer.hpp"
 40 #include "utilities/growableArray.hpp"
 41 
 42 // A "CollectedHeap" is an implementation of a java heap for HotSpot.  This
 43 // is an abstract class: there may be many different kinds of heaps.  This
 44 // class defines the functions that a heap must implement, and contains
 45 // infrastructure common to all heaps.
 46 
 47 class WorkerTask;
 48 class AdaptiveSizePolicy;
 49 class BarrierSet;
 50 class GCHeapLog;
 51 class GCHeapSummary;
 52 class GCTimer;
 53 class GCTracer;
 54 class GCMemoryManager;
 55 class MemoryPool;
 56 class MetaspaceSummary;
 57 class ReservedHeapSpace;
 58 class SoftRefPolicy;
 59 class Thread;
 60 class ThreadClosure;
 61 class VirtualSpaceSummary;
 62 class WorkerThreads;
 63 class nmethod;
 64 
 65 class ParallelObjectIterator : public CHeapObj<mtGC> {
 66 public:
 67   virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0;
 68   virtual ~ParallelObjectIterator() {}
 69 };
 70 
 71 //
 72 // CollectedHeap
 73 //   GenCollectedHeap
 74 //     SerialHeap
 75 //   G1CollectedHeap
 76 //   ParallelScavengeHeap
 77 //   ShenandoahHeap
 78 //   ZCollectedHeap
 79 //
 80 class CollectedHeap : public CHeapObj<mtInternal> {
 81   friend class VMStructs;
 82   friend class JVMCIVMStructs;
 83   friend class IsGCActiveMark; // Block structured external access to _is_gc_active
 84   friend class MemAllocator;
 85 
 86  private:
 87   GCHeapLog* _gc_heap_log;
 88 
 89   // Historic gc information
 90   size_t _capacity_at_last_gc;
 91   size_t _used_at_last_gc;
 92 
 93  protected:
 94   // Not used by all GCs
 95   MemRegion _reserved;
 96 
 97   bool _is_gc_active;
 98 
 99   // Used for filler objects (static, but initialized in ctor).
100   static size_t _filler_array_max_size;
101 
102   // Last time the whole heap has been examined in support of RMI
103   // MaxObjectInspectionAge.
104   // This timestamp must be monotonically non-decreasing to avoid
105   // time-warp warnings.
106   jlong _last_whole_heap_examined_time_ns;
107 
108   unsigned int _total_collections;          // ... started
109   unsigned int _total_full_collections;     // ... started
110   NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
111   NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
112 
113   // Reason for current garbage collection.  Should be set to
114   // a value reflecting no collection between collections.
115   GCCause::Cause _gc_cause;
116   GCCause::Cause _gc_lastcause;
117   PerfStringVariable* _perf_gc_cause;
118   PerfStringVariable* _perf_gc_lastcause;
119 
120   // Constructor
121   CollectedHeap();
122 
123   // Create a new tlab. All TLAB allocations must go through this.
124   // To allow more flexible TLAB allocations min_size specifies
125   // the minimum size needed, while requested_size is the requested
126   // size based on ergonomics. The actually allocated size will be
127   // returned in actual_size.
128   virtual HeapWord* allocate_new_tlab(size_t min_size,
129                                       size_t requested_size,
130                                       size_t* actual_size);
131 
132   // Reinitialize tlabs before resuming mutators.
133   virtual void resize_all_tlabs();
134 
135   // Raw memory allocation facilities
136   // The obj and array allocate methods are covers for these methods.
137   // mem_allocate() should never be
138   // called to allocate TLABs, only individual objects.
139   virtual HeapWord* mem_allocate(size_t size,
140                                  bool* gc_overhead_limit_was_exceeded) = 0;
141 
142   // Filler object utilities.
143   static inline size_t filler_array_hdr_size();
144   static inline size_t filler_array_min_size();
145 
146   DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
147   DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
148 
149   // Fill with a single array; caller must ensure filler_array_min_size() <=
150   // words <= filler_array_max_size().
151   static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
152 
153   // Fill with a single object (either an int array or a java.lang.Object).
154   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
155 
156   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
157 
158   // Verification functions
159   virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
160     PRODUCT_RETURN;
161   debug_only(static void check_for_valid_allocation_state();)
162 
163  public:
164   enum Name {
165     None,
166     Serial,
167     Parallel,
168     G1,
169     Epsilon,
170     Z,
171     Shenandoah
172   };
173 
174  protected:
175   // Get a pointer to the derived heap object.  Used to implement
176   // derived class heap() functions rather than being called directly.
177   template<typename T>
178   static T* named_heap(Name kind) {
179     CollectedHeap* heap = Universe::heap();
180     assert(heap != NULL, "Uninitialized heap");
181     assert(kind == heap->kind(), "Heap kind %u should be %u",
182            static_cast<uint>(heap->kind()), static_cast<uint>(kind));
183     return static_cast<T*>(heap);
184   }
185 
186  public:
187 
188   static inline size_t filler_array_max_size() {
189     return _filler_array_max_size;
190   }
191 
192   virtual Name kind() const = 0;
193 
194   virtual const char* name() const = 0;
195 
196   /**
197    * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
198    * and JNI_OK on success.
199    */
200   virtual jint initialize() = 0;
201 
202   // In many heaps, there will be a need to perform some initialization activities
203   // after the Universe is fully formed, but before general heap allocation is allowed.
204   // This is the correct place to place such initialization methods.
205   virtual void post_initialize();
206 
207   // Stop any onging concurrent work and prepare for exit.
208   virtual void stop() {}
209 
210   // Stop and resume concurrent GC threads interfering with safepoint operations
211   virtual void safepoint_synchronize_begin() {}
212   virtual void safepoint_synchronize_end() {}
213 
214   void initialize_reserved_region(const ReservedHeapSpace& rs);
215 
216   virtual size_t capacity() const = 0;
217   virtual size_t used() const = 0;
218 
219   // Returns unused capacity.
220   virtual size_t unused() const;
221 
222   // Historic gc information
223   size_t free_at_last_gc() const { return _capacity_at_last_gc - _used_at_last_gc; }
224   size_t used_at_last_gc() const { return _used_at_last_gc; }
225   void update_capacity_and_used_at_gc();
226 
227   // Return "true" if the part of the heap that allocates Java
228   // objects has reached the maximal committed limit that it can
229   // reach, without a garbage collection.
230   virtual bool is_maximal_no_gc() const = 0;
231 
232   // Support for java.lang.Runtime.maxMemory():  return the maximum amount of
233   // memory that the vm could make available for storing 'normal' java objects.
234   // This is based on the reserved address space, but should not include space
235   // that the vm uses internally for bookkeeping or temporary storage
236   // (e.g., in the case of the young gen, one of the survivor
237   // spaces).
238   virtual size_t max_capacity() const = 0;
239 
240   // Returns "TRUE" iff "p" points into the committed areas of the heap.
241   // This method can be expensive so avoid using it in performance critical
242   // code.
243   virtual bool is_in(const void* p) const = 0;
244 
245   DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
246 
247   virtual uint32_t hash_oop(oop obj) const;
248 
249   void set_gc_cause(GCCause::Cause v);
250   GCCause::Cause gc_cause() { return _gc_cause; }
251 
252   oop obj_allocate(Klass* klass, size_t size, TRAPS);
253   virtual oop array_allocate(Klass* klass, size_t size, int length, bool do_zero, TRAPS);
254   oop class_allocate(Klass* klass, size_t size, TRAPS);
255 
256   // Utilities for turning raw memory into filler objects.
257   //
258   // min_fill_size() is the smallest region that can be filled.
259   // fill_with_objects() can fill arbitrary-sized regions of the heap using
260   // multiple objects.  fill_with_object() is for regions known to be smaller
261   // than the largest array of integers; it uses a single object to fill the
262   // region and has slightly less overhead.
263   static size_t min_fill_size() {
264     return size_t(align_object_size(oopDesc::header_size()));
265   }
266 
267   static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
268 
269   static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
270   static void fill_with_object(MemRegion region, bool zap = true) {
271     fill_with_object(region.start(), region.word_size(), zap);
272   }
273   static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
274     fill_with_object(start, pointer_delta(end, start), zap);
275   }
276 
277   virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
278   virtual size_t min_dummy_object_size() const;
279   size_t tlab_alloc_reserve() const;
280 
281   // Some heaps may offer a contiguous region for shared non-blocking
282   // allocation, via inlined code (by exporting the address of the top and
283   // end fields defining the extent of the contiguous allocation region.)
284 
285   // This function returns "true" iff the heap supports this kind of
286   // allocation.  (Default is "no".)
287   virtual bool supports_inline_contig_alloc() const {
288     return false;
289   }
290   // These functions return the addresses of the fields that define the
291   // boundaries of the contiguous allocation area.  (These fields should be
292   // physically near to one another.)
293   virtual HeapWord* volatile* top_addr() const {
294     guarantee(false, "inline contiguous allocation not supported");
295     return NULL;
296   }
297   virtual HeapWord** end_addr() const {
298     guarantee(false, "inline contiguous allocation not supported");
299     return NULL;
300   }
301 
302   // Some heaps may be in an unparseable state at certain times between
303   // collections. This may be necessary for efficient implementation of
304   // certain allocation-related activities. Calling this function before
305   // attempting to parse a heap ensures that the heap is in a parsable
306   // state (provided other concurrent activity does not introduce
307   // unparsability). It is normally expected, therefore, that this
308   // method is invoked with the world stopped.
309   // NOTE: if you override this method, make sure you call
310   // super::ensure_parsability so that the non-generational
311   // part of the work gets done. See implementation of
312   // CollectedHeap::ensure_parsability and, for instance,
313   // that of GenCollectedHeap::ensure_parsability().
314   // The argument "retire_tlabs" controls whether existing TLABs
315   // are merely filled or also retired, thus preventing further
316   // allocation from them and necessitating allocation of new TLABs.
317   virtual void ensure_parsability(bool retire_tlabs);
318 
319   // The amount of space available for thread-local allocation buffers.
320   virtual size_t tlab_capacity(Thread *thr) const = 0;
321 
322   // The amount of used space for thread-local allocation buffers for the given thread.
323   virtual size_t tlab_used(Thread *thr) const = 0;
324 
325   virtual size_t max_tlab_size() const;
326 
327   // An estimate of the maximum allocation that could be performed
328   // for thread-local allocation buffers without triggering any
329   // collection or expansion activity.
330   virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
331     guarantee(false, "thread-local allocation buffers not supported");
332     return 0;
333   }
334 
335   // If a GC uses a stack watermark barrier, the stack processing is lazy, concurrent,
336   // incremental and cooperative. In order for that to work well, mechanisms that stop
337   // another thread might want to ensure its roots are in a sane state.
338   virtual bool uses_stack_watermark_barrier() const { return false; }
339 
340   // Perform a collection of the heap; intended for use in implementing
341   // "System.gc".  This probably implies as full a collection as the
342   // "CollectedHeap" supports.
343   virtual void collect(GCCause::Cause cause) = 0;
344 
345   // Perform a full collection
346   virtual void do_full_collection(bool clear_all_soft_refs) = 0;
347 
348   // This interface assumes that it's being called by the
349   // vm thread. It collects the heap assuming that the
350   // heap lock is already held and that we are executing in
351   // the context of the vm thread.
352   virtual void collect_as_vm_thread(GCCause::Cause cause);
353 
354   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
355                                                        size_t size,
356                                                        Metaspace::MetadataType mdtype);
357 
358   // Continuation support
359   virtual void collect_for_codecache();
360 
361   // Return true, if accesses to the object would require barriers.
362   // This is used by continuations to copy chunks of a thread stack into StackChunk object or out of a StackChunk
363   // object back into the thread stack. These chunks may contain references to objects. It is crucial that
364   // the GC does not attempt to traverse the object while we modify it, because its structure (oopmap) is changed
365   // when stack chunks are stored into it.
366   // StackChunk objects may be reused, the GC must not assume that a StackChunk object is always a freshly
367   // allocated object.
368   virtual bool requires_barriers(oop obj) const = 0;
369 
370   // Returns "true" iff there is a stop-world GC in progress.  (I assume
371   // that it should answer "false" for the concurrent part of a concurrent
372   // collector -- dld).
373   bool is_gc_active() const { return _is_gc_active; }
374 
375   // Total number of GC collections (started)
376   unsigned int total_collections() const { return _total_collections; }
377   unsigned int total_full_collections() const { return _total_full_collections;}
378 
379   // Increment total number of GC collections (started)
380   void increment_total_collections(bool full = false) {
381     _total_collections++;
382     if (full) {
383       increment_total_full_collections();
384     }
385   }
386 
387   void increment_total_full_collections() { _total_full_collections++; }
388 
389   // Return the SoftRefPolicy for the heap;
390   virtual SoftRefPolicy* soft_ref_policy() = 0;
391 
392   virtual MemoryUsage memory_usage();
393   virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
394   virtual GrowableArray<MemoryPool*> memory_pools() = 0;
395 
396   // Iterate over all objects, calling "cl.do_object" on each.
397   virtual void object_iterate(ObjectClosure* cl) = 0;
398 
399   virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num) {
400     return NULL;
401   }
402 
403   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
404   virtual void keep_alive(oop obj) {}
405 
406   // Perform any cleanup actions necessary before allowing a verification.
407   virtual void prepare_for_verify() = 0;
408 
409   // Returns the longest time (in ms) that has elapsed since the last
410   // time that the whole heap has been examined by a garbage collection.
411   jlong millis_since_last_whole_heap_examined();
412   // GC should call this when the next whole heap analysis has completed to
413   // satisfy above requirement.
414   void record_whole_heap_examined_timestamp();
415 
416  private:
417   // Generate any dumps preceding or following a full gc
418   void full_gc_dump(GCTimer* timer, bool before);
419 
420   virtual void initialize_serviceability() = 0;
421 
422  public:
423   void pre_full_gc_dump(GCTimer* timer);
424   void post_full_gc_dump(GCTimer* timer);
425 
426   virtual VirtualSpaceSummary create_heap_space_summary();
427   GCHeapSummary create_heap_summary();
428 
429   MetaspaceSummary create_metaspace_summary();
430 
431   // Print heap information on the given outputStream.
432   virtual void print_on(outputStream* st) const = 0;
433   // The default behavior is to call print_on() on tty.
434   virtual void print() const;
435 
436   // Print more detailed heap information on the given
437   // outputStream. The default behavior is to call print_on(). It is
438   // up to each subclass to override it and add any additional output
439   // it needs.
440   virtual void print_extended_on(outputStream* st) const {
441     print_on(st);
442   }
443 
444   virtual void print_on_error(outputStream* st) const;
445 
446   // Used to print information about locations in the hs_err file.
447   virtual bool print_location(outputStream* st, void* addr) const = 0;
448 
449   // Iterator for all GC threads (other than VM thread)
450   virtual void gc_threads_do(ThreadClosure* tc) const = 0;
451 
452   // Print any relevant tracing info that flags imply.
453   // Default implementation does nothing.
454   virtual void print_tracing_info() const = 0;
455 
456   void print_heap_before_gc();
457   void print_heap_after_gc();
458 
459   // Registering and unregistering an nmethod (compiled code) with the heap.
460   virtual void register_nmethod(nmethod* nm) = 0;
461   virtual void unregister_nmethod(nmethod* nm) = 0;
462   // Callback for when nmethod is about to be deleted.
463   virtual void flush_nmethod(nmethod* nm) = 0;
464   virtual void verify_nmethod(nmethod* nm) = 0;
465 
466   void trace_heap_before_gc(const GCTracer* gc_tracer);
467   void trace_heap_after_gc(const GCTracer* gc_tracer);
468 
469   // Heap verification
470   virtual void verify(VerifyOption option) = 0;
471 
472   // Return true if concurrent gc control via WhiteBox is supported by
473   // this collector.  The default implementation returns false.
474   virtual bool supports_concurrent_gc_breakpoints() const;
475 
476   // Workers used in non-GC safepoints for parallel safepoint cleanup. If this
477   // method returns NULL, cleanup tasks are done serially in the VMThread. See
478   // `SafepointSynchronize::do_cleanup_tasks` for details.
479   // GCs using a GC worker thread pool inside GC safepoints may opt to share
480   // that pool with non-GC safepoints, avoiding creating extraneous threads.
481   // Such sharing is safe, because GC safepoints and non-GC safepoints never
482   // overlap. For example, `G1CollectedHeap::workers()` (for GC safepoints) and
483   // `G1CollectedHeap::safepoint_workers()` (for non-GC safepoints) return the
484   // same thread-pool.
485   virtual WorkerThreads* safepoint_workers() { return NULL; }
486 
487   // Support for object pinning. This is used by JNI Get*Critical()
488   // and Release*Critical() family of functions. If supported, the GC
489   // must guarantee that pinned objects never move.
490   virtual bool supports_object_pinning() const;
491   virtual oop pin_object(JavaThread* thread, oop obj);
492   virtual void unpin_object(JavaThread* thread, oop obj);
493 
494   // Is the given object inside a CDS archive area?
495   virtual bool is_archived_object(oop object) const;
496 
497   // Support for loading objects from CDS archive into the heap
498   // (usually as a snapshot of the old generation).
499   virtual bool can_load_archived_objects() const { return false; }
500   virtual HeapWord* allocate_loaded_archive_space(size_t size) { return NULL; }
501   virtual void complete_loaded_archive_space(MemRegion archive_space) { }
502 
503   virtual bool is_oop(oop object) const;
504   // Non product verification and debugging.
505 #ifndef PRODUCT
506   // Support for PromotionFailureALot.  Return true if it's time to cause a
507   // promotion failure.  The no-argument version uses
508   // this->_promotion_failure_alot_count as the counter.
509   bool promotion_should_fail(volatile size_t* count);
510   bool promotion_should_fail();
511 
512   // Reset the PromotionFailureALot counters.  Should be called at the end of a
513   // GC in which promotion failure occurred.
514   void reset_promotion_should_fail(volatile size_t* count);
515   void reset_promotion_should_fail();
516 #endif  // #ifndef PRODUCT
517 };
518 
519 // Class to set and reset the GC cause for a CollectedHeap.
520 
521 class GCCauseSetter : StackObj {
522   CollectedHeap* _heap;
523   GCCause::Cause _previous_cause;
524  public:
525   GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
526     _heap = heap;
527     _previous_cause = _heap->gc_cause();
528     _heap->set_gc_cause(cause);
529   }
530 
531   ~GCCauseSetter() {
532     _heap->set_gc_cause(_previous_cause);
533   }
534 };
535 
536 #endif // SHARE_GC_SHARED_COLLECTEDHEAP_HPP
--- EOF ---