1 /*
2 * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_SHARED_COLLECTEDHEAP_HPP
26 #define SHARE_GC_SHARED_COLLECTEDHEAP_HPP
27
28 #include "gc/shared/gcCause.hpp"
29 #include "gc/shared/gcWhen.hpp"
30 #include "gc/shared/verifyOption.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/metaspace.hpp"
33 #include "memory/universe.hpp"
34 #include "runtime/handles.hpp"
35 #include "runtime/perfDataTypes.hpp"
36 #include "runtime/safepoint.hpp"
37 #include "services/memoryUsage.hpp"
38 #include "utilities/debug.hpp"
39 #include "utilities/formatBuffer.hpp"
40 #include "utilities/growableArray.hpp"
41
42 // A "CollectedHeap" is an implementation of a java heap for HotSpot. This
43 // is an abstract class: there may be many different kinds of heaps. This
44 // class defines the functions that a heap must implement, and contains
45 // infrastructure common to all heaps.
46
47 class AbstractGangTask;
48 class AdaptiveSizePolicy;
49 class BarrierSet;
50 class GCHeapLog;
51 class GCHeapSummary;
52 class GCTimer;
53 class GCTracer;
54 class GCMemoryManager;
55 class MemoryPool;
56 class MetaspaceSummary;
57 class ReservedHeapSpace;
58 class SoftRefPolicy;
59 class Thread;
60 class ThreadClosure;
61 class VirtualSpaceSummary;
62 class WorkGang;
63 class nmethod;
64
65 class ParallelObjectIteratorImpl : public CHeapObj<mtGC> {
66 public:
67 virtual ~ParallelObjectIteratorImpl() {}
68 virtual void object_iterate(ObjectClosure* cl, uint worker_id) = 0;
69 };
70
71 // User facing parallel object iterator. This is a StackObj, which ensures that
72 // the _impl is allocated and deleted in the scope of this object. This ensures
73 // the life cycle of the implementation is as required by ThreadsListHandle,
74 // which is sometimes used by the root iterators.
75 class ParallelObjectIterator : public StackObj {
76 ParallelObjectIteratorImpl* _impl;
77
78 public:
79 ParallelObjectIterator(uint thread_num);
80 ~ParallelObjectIterator();
81 void object_iterate(ObjectClosure* cl, uint worker_id);
82 };
83
84 //
85 // CollectedHeap
86 // GenCollectedHeap
87 // SerialHeap
88 // G1CollectedHeap
89 // ParallelScavengeHeap
90 // ShenandoahHeap
91 // ZCollectedHeap
92 //
93 class CollectedHeap : public CHeapObj<mtGC> {
94 friend class VMStructs;
95 friend class JVMCIVMStructs;
96 friend class IsGCActiveMark; // Block structured external access to _is_gc_active
97 friend class MemAllocator;
98 friend class ParallelObjectIterator;
99
100 private:
101 GCHeapLog* _gc_heap_log;
102
103 // Historic gc information
104 size_t _capacity_at_last_gc;
105 size_t _used_at_last_gc;
106
107 protected:
108 // Not used by all GCs
109 MemRegion _reserved;
110
111 bool _is_gc_active;
112
113 // Used for filler objects (static, but initialized in ctor).
114 static size_t _filler_array_max_size;
115
116 // Last time the whole heap has been examined in support of RMI
117 // MaxObjectInspectionAge.
118 // This timestamp must be monotonically non-decreasing to avoid
119 // time-warp warnings.
120 jlong _last_whole_heap_examined_time_ns;
121
122 unsigned int _total_collections; // ... started
123 unsigned int _total_full_collections; // ... started
124 NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;)
125 NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;)
126
127 // Reason for current garbage collection. Should be set to
128 // a value reflecting no collection between collections.
129 GCCause::Cause _gc_cause;
130 GCCause::Cause _gc_lastcause;
131 PerfStringVariable* _perf_gc_cause;
132 PerfStringVariable* _perf_gc_lastcause;
133
134 // Constructor
135 CollectedHeap();
136
137 // Create a new tlab. All TLAB allocations must go through this.
138 // To allow more flexible TLAB allocations min_size specifies
139 // the minimum size needed, while requested_size is the requested
140 // size based on ergonomics. The actually allocated size will be
141 // returned in actual_size.
142 virtual HeapWord* allocate_new_tlab(size_t min_size,
143 size_t requested_size,
144 size_t* actual_size);
145
146 // Reinitialize tlabs before resuming mutators.
147 virtual void resize_all_tlabs();
148
149 // Raw memory allocation facilities
150 // The obj and array allocate methods are covers for these methods.
151 // mem_allocate() should never be
152 // called to allocate TLABs, only individual objects.
153 virtual HeapWord* mem_allocate(size_t size,
154 bool* gc_overhead_limit_was_exceeded) = 0;
155
156 // Filler object utilities.
157 static inline size_t filler_array_hdr_size();
158 static inline size_t filler_array_min_size();
159
160 DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
161 DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
162
163 // Fill with a single array; caller must ensure filler_array_min_size() <=
164 // words <= filler_array_max_size().
165 static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true);
166
167 // Fill with a single object (either an int array or a java.lang.Object).
168 static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
169
170 virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
171
172 // Verification functions
173 debug_only(static void check_for_valid_allocation_state();)
174
175 public:
176 enum Name {
177 None,
178 Serial,
179 Parallel,
180 G1,
181 Epsilon,
182 Z,
183 Shenandoah
184 };
185
186 protected:
187 // Get a pointer to the derived heap object. Used to implement
188 // derived class heap() functions rather than being called directly.
189 template<typename T>
190 static T* named_heap(Name kind) {
191 CollectedHeap* heap = Universe::heap();
192 assert(heap != NULL, "Uninitialized heap");
193 assert(kind == heap->kind(), "Heap kind %u should be %u",
194 static_cast<uint>(heap->kind()), static_cast<uint>(kind));
195 return static_cast<T*>(heap);
196 }
197
198 public:
199
200 static inline size_t filler_array_max_size() {
201 return _filler_array_max_size;
202 }
203
204 virtual Name kind() const = 0;
205
206 virtual const char* name() const = 0;
207
208 /**
209 * Returns JNI error code JNI_ENOMEM if memory could not be allocated,
210 * and JNI_OK on success.
211 */
212 virtual jint initialize() = 0;
213
214 // In many heaps, there will be a need to perform some initialization activities
215 // after the Universe is fully formed, but before general heap allocation is allowed.
216 // This is the correct place to place such initialization methods.
217 virtual void post_initialize();
218
219 // Stop any onging concurrent work and prepare for exit.
220 virtual void stop() {}
221
222 // Stop and resume concurrent GC threads interfering with safepoint operations
223 virtual void safepoint_synchronize_begin() {}
224 virtual void safepoint_synchronize_end() {}
225
226 void initialize_reserved_region(const ReservedHeapSpace& rs);
227
228 virtual size_t capacity() const = 0;
229 virtual size_t used() const = 0;
230
231 // Returns unused capacity.
232 virtual size_t unused() const;
233
234 // Historic gc information
235 size_t free_at_last_gc() const { return _capacity_at_last_gc - _used_at_last_gc; }
236 size_t used_at_last_gc() const { return _used_at_last_gc; }
237 void update_capacity_and_used_at_gc();
238
239 // Return "true" if the part of the heap that allocates Java
240 // objects has reached the maximal committed limit that it can
241 // reach, without a garbage collection.
242 virtual bool is_maximal_no_gc() const = 0;
243
244 // Support for java.lang.Runtime.maxMemory(): return the maximum amount of
245 // memory that the vm could make available for storing 'normal' java objects.
246 // This is based on the reserved address space, but should not include space
247 // that the vm uses internally for bookkeeping or temporary storage
248 // (e.g., in the case of the young gen, one of the survivor
249 // spaces).
250 virtual size_t max_capacity() const = 0;
251
252 // Returns "TRUE" iff "p" points into the committed areas of the heap.
253 // This method can be expensive so avoid using it in performance critical
254 // code.
255 virtual bool is_in(const void* p) const = 0;
256
257 DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); })
258
259 virtual uint32_t hash_oop(oop obj) const;
260
261 void set_gc_cause(GCCause::Cause v);
262 GCCause::Cause gc_cause() { return _gc_cause; }
263
264 oop obj_allocate(Klass* klass, int size, TRAPS);
265 virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS);
266 oop class_allocate(Klass* klass, int size, TRAPS);
267
268 // Utilities for turning raw memory into filler objects.
269 //
270 // min_fill_size() is the smallest region that can be filled.
271 // fill_with_objects() can fill arbitrary-sized regions of the heap using
272 // multiple objects. fill_with_object() is for regions known to be smaller
273 // than the largest array of integers; it uses a single object to fill the
274 // region and has slightly less overhead.
275 static size_t min_fill_size() {
276 return size_t(align_object_size(oopDesc::header_size()));
277 }
278
279 static void fill_with_objects(HeapWord* start, size_t words, bool zap = true);
280
281 static void fill_with_object(HeapWord* start, size_t words, bool zap = true);
282 static void fill_with_object(MemRegion region, bool zap = true) {
283 fill_with_object(region.start(), region.word_size(), zap);
284 }
285 static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) {
286 fill_with_object(start, pointer_delta(end, start), zap);
287 }
288
289 virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
290 virtual size_t min_dummy_object_size() const;
291 size_t tlab_alloc_reserve() const;
292
293 // Some heaps may offer a contiguous region for shared non-blocking
294 // allocation, via inlined code (by exporting the address of the top and
295 // end fields defining the extent of the contiguous allocation region.)
296
297 // This function returns "true" iff the heap supports this kind of
298 // allocation. (Default is "no".)
299 virtual bool supports_inline_contig_alloc() const {
300 return false;
301 }
302 // These functions return the addresses of the fields that define the
303 // boundaries of the contiguous allocation area. (These fields should be
304 // physically near to one another.)
305 virtual HeapWord* volatile* top_addr() const {
306 guarantee(false, "inline contiguous allocation not supported");
307 return NULL;
308 }
309 virtual HeapWord** end_addr() const {
310 guarantee(false, "inline contiguous allocation not supported");
311 return NULL;
312 }
313
314 // Some heaps may be in an unparseable state at certain times between
315 // collections. This may be necessary for efficient implementation of
316 // certain allocation-related activities. Calling this function before
317 // attempting to parse a heap ensures that the heap is in a parsable
318 // state (provided other concurrent activity does not introduce
319 // unparsability). It is normally expected, therefore, that this
320 // method is invoked with the world stopped.
321 // NOTE: if you override this method, make sure you call
322 // super::ensure_parsability so that the non-generational
323 // part of the work gets done. See implementation of
324 // CollectedHeap::ensure_parsability and, for instance,
325 // that of GenCollectedHeap::ensure_parsability().
326 // The argument "retire_tlabs" controls whether existing TLABs
327 // are merely filled or also retired, thus preventing further
328 // allocation from them and necessitating allocation of new TLABs.
329 virtual void ensure_parsability(bool retire_tlabs);
330
331 // The amount of space available for thread-local allocation buffers.
332 virtual size_t tlab_capacity(Thread *thr) const = 0;
333
334 // The amount of used space for thread-local allocation buffers for the given thread.
335 virtual size_t tlab_used(Thread *thr) const = 0;
336
337 virtual size_t max_tlab_size() const;
338
339 // An estimate of the maximum allocation that could be performed
340 // for thread-local allocation buffers without triggering any
341 // collection or expansion activity.
342 virtual size_t unsafe_max_tlab_alloc(Thread *thr) const {
343 guarantee(false, "thread-local allocation buffers not supported");
344 return 0;
345 }
346
347 // If a GC uses a stack watermark barrier, the stack processing is lazy, concurrent,
348 // incremental and cooperative. In order for that to work well, mechanisms that stop
349 // another thread might want to ensure its roots are in a sane state.
350 virtual bool uses_stack_watermark_barrier() const { return false; }
351
352 // Perform a collection of the heap; intended for use in implementing
353 // "System.gc". This probably implies as full a collection as the
354 // "CollectedHeap" supports.
355 virtual void collect(GCCause::Cause cause) = 0;
356
357 // Perform a full collection
358 virtual void do_full_collection(bool clear_all_soft_refs) = 0;
359
360 // This interface assumes that it's being called by the
361 // vm thread. It collects the heap assuming that the
362 // heap lock is already held and that we are executing in
363 // the context of the vm thread.
364 virtual void collect_as_vm_thread(GCCause::Cause cause);
365
366 virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
367 size_t size,
368 Metaspace::MetadataType mdtype);
369
370 // Returns "true" iff there is a stop-world GC in progress. (I assume
371 // that it should answer "false" for the concurrent part of a concurrent
372 // collector -- dld).
373 bool is_gc_active() const { return _is_gc_active; }
374
375 // Total number of GC collections (started)
376 unsigned int total_collections() const { return _total_collections; }
377 unsigned int total_full_collections() const { return _total_full_collections;}
378
379 // Increment total number of GC collections (started)
380 void increment_total_collections(bool full = false) {
381 _total_collections++;
382 if (full) {
383 increment_total_full_collections();
384 }
385 }
386
387 void increment_total_full_collections() { _total_full_collections++; }
388
389 // Return the SoftRefPolicy for the heap;
390 virtual SoftRefPolicy* soft_ref_policy() = 0;
391
392 virtual MemoryUsage memory_usage();
393 virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
394 virtual GrowableArray<MemoryPool*> memory_pools() = 0;
395
396 // Iterate over all objects, calling "cl.do_object" on each.
397 virtual void object_iterate(ObjectClosure* cl) = 0;
398
399 protected:
400 virtual ParallelObjectIteratorImpl* parallel_object_iterator(uint thread_num) {
401 return NULL;
402 }
403
404 public:
405 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
406 virtual void keep_alive(oop obj) {}
407
408 // Perform any cleanup actions necessary before allowing a verification.
409 virtual void prepare_for_verify() = 0;
410
411 // Returns the longest time (in ms) that has elapsed since the last
412 // time that the whole heap has been examined by a garbage collection.
413 jlong millis_since_last_whole_heap_examined();
414 // GC should call this when the next whole heap analysis has completed to
415 // satisfy above requirement.
416 void record_whole_heap_examined_timestamp();
417
418 private:
419 // Generate any dumps preceding or following a full gc
420 void full_gc_dump(GCTimer* timer, bool before);
421
422 virtual void initialize_serviceability() = 0;
423
424 public:
425 void pre_full_gc_dump(GCTimer* timer);
426 void post_full_gc_dump(GCTimer* timer);
427
428 virtual VirtualSpaceSummary create_heap_space_summary();
429 GCHeapSummary create_heap_summary();
430
431 MetaspaceSummary create_metaspace_summary();
432
433 // Print heap information on the given outputStream.
434 virtual void print_on(outputStream* st) const = 0;
435 // The default behavior is to call print_on() on tty.
436 virtual void print() const;
437
438 // Print more detailed heap information on the given
439 // outputStream. The default behavior is to call print_on(). It is
440 // up to each subclass to override it and add any additional output
441 // it needs.
442 virtual void print_extended_on(outputStream* st) const {
443 print_on(st);
444 }
445
446 virtual void print_on_error(outputStream* st) const;
447
448 // Used to print information about locations in the hs_err file.
449 virtual bool print_location(outputStream* st, void* addr) const = 0;
450
451 // Iterator for all GC threads (other than VM thread)
452 virtual void gc_threads_do(ThreadClosure* tc) const = 0;
453
454 // Print any relevant tracing info that flags imply.
455 // Default implementation does nothing.
456 virtual void print_tracing_info() const = 0;
457
458 void print_heap_before_gc();
459 void print_heap_after_gc();
460
461 // Registering and unregistering an nmethod (compiled code) with the heap.
462 virtual void register_nmethod(nmethod* nm) = 0;
463 virtual void unregister_nmethod(nmethod* nm) = 0;
464 // Callback for when nmethod is about to be deleted.
465 virtual void flush_nmethod(nmethod* nm) = 0;
466 virtual void verify_nmethod(nmethod* nm) = 0;
467
468 void trace_heap_before_gc(const GCTracer* gc_tracer);
469 void trace_heap_after_gc(const GCTracer* gc_tracer);
470
471 // Heap verification
472 virtual void verify(VerifyOption option) = 0;
473
474 // Return true if concurrent gc control via WhiteBox is supported by
475 // this collector. The default implementation returns false.
476 virtual bool supports_concurrent_gc_breakpoints() const;
477
478 // Provides a thread pool to SafepointSynchronize to use
479 // for parallel safepoint cleanup.
480 // GCs that use a GC worker thread pool may want to share
481 // it for use during safepoint cleanup. This is only possible
482 // if the GC can pause and resume concurrent work (e.g. G1
483 // concurrent marking) for an intermittent non-GC safepoint.
484 // If this method returns NULL, SafepointSynchronize will
485 // perform cleanup tasks serially in the VMThread.
486 virtual WorkGang* safepoint_workers() { return NULL; }
487
488 // Support for object pinning. This is used by JNI Get*Critical()
489 // and Release*Critical() family of functions. If supported, the GC
490 // must guarantee that pinned objects never move.
491 virtual bool supports_object_pinning() const;
492 virtual oop pin_object(JavaThread* thread, oop obj);
493 virtual void unpin_object(JavaThread* thread, oop obj);
494
495 // Is the given object inside a CDS archive area?
496 virtual bool is_archived_object(oop object) const;
497
498 virtual bool is_oop(oop object) const;
499 // Non product verification and debugging.
500 #ifndef PRODUCT
501 // Support for PromotionFailureALot. Return true if it's time to cause a
502 // promotion failure. The no-argument version uses
503 // this->_promotion_failure_alot_count as the counter.
504 bool promotion_should_fail(volatile size_t* count);
505 bool promotion_should_fail();
506
507 // Reset the PromotionFailureALot counters. Should be called at the end of a
508 // GC in which promotion failure occurred.
509 void reset_promotion_should_fail(volatile size_t* count);
510 void reset_promotion_should_fail();
511 #endif // #ifndef PRODUCT
512 };
513
514 // Class to set and reset the GC cause for a CollectedHeap.
515
516 class GCCauseSetter : StackObj {
517 CollectedHeap* _heap;
518 GCCause::Cause _previous_cause;
519 public:
520 GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) {
521 _heap = heap;
522 _previous_cause = _heap->gc_cause();
523 _heap->set_gc_cause(cause);
524 }
525
526 ~GCCauseSetter() {
527 _heap->set_gc_cause(_previous_cause);
528 }
529 };
530
531 #endif // SHARE_GC_SHARED_COLLECTEDHEAP_HPP