1 /*
  2  * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
 26 #define SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
 27 
 28 #include "gc/parallel/objectStartArray.hpp"
 29 #include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
 30 #include "gc/parallel/psOldGen.hpp"
 31 #include "gc/parallel/psYoungGen.hpp"
 32 #include "gc/shared/cardTableBarrierSet.hpp"
 33 #include "gc/shared/collectedHeap.hpp"
 34 #include "gc/shared/gcPolicyCounters.hpp"
 35 #include "gc/shared/gcWhen.hpp"
 36 #include "gc/shared/preGCValues.hpp"
 37 #include "gc/shared/referenceProcessor.hpp"
 38 #include "gc/shared/softRefPolicy.hpp"
 39 #include "gc/shared/strongRootsScope.hpp"
 40 #include "gc/shared/workerThread.hpp"
 41 #include "logging/log.hpp"
 42 #include "utilities/growableArray.hpp"
 43 #include "utilities/ostream.hpp"
 44 
 45 class GCHeapSummary;
 46 class HeapBlockClaimer;
 47 class MemoryManager;
 48 class MemoryPool;
 49 class PSAdaptiveSizePolicy;
 50 class PSCardTable;
 51 class PSHeapSummary;
 52 
 53 // ParallelScavengeHeap is the implementation of CollectedHeap for Parallel GC.
 54 //
 55 // The heap is reserved up-front in a single contiguous block, split into two
 56 // parts, the old and young generation. The old generation resides at lower
 57 // addresses, the young generation at higher addresses. The boundary address
 58 // between the generations is fixed. Within a generation, committed memory
 59 // grows towards higher addresses.
 60 //
 61 //
 62 // low                                                                high
 63 //
 64 //                          +-- generation boundary (fixed after startup)
 65 //                          |
 66 // |<- old gen (reserved) ->|<-       young gen (reserved)             ->|
 67 // +---------------+--------+-----------------+--------+--------+--------+
 68 // |      old      |        |       eden      |  from  |   to   |        |
 69 // |               |        |                 |  (to)  | (from) |        |
 70 // +---------------+--------+-----------------+--------+--------+--------+
 71 // |<- committed ->|        |<-          committed            ->|
 72 //
 73 class ParallelScavengeHeap : public CollectedHeap {
 74   friend class VMStructs;
 75  private:
 76   static PSYoungGen* _young_gen;
 77   static PSOldGen*   _old_gen;
 78 
 79   // Sizing policy for entire heap
 80   static PSAdaptiveSizePolicy*       _size_policy;
 81   static PSGCAdaptivePolicyCounters* _gc_policy_counters;
 82 
 83   SoftRefPolicy _soft_ref_policy;
 84 
 85   unsigned int _death_march_count;
 86 
 87   GCMemoryManager* _young_manager;
 88   GCMemoryManager* _old_manager;
 89 
 90   MemoryPool* _eden_pool;
 91   MemoryPool* _survivor_pool;
 92   MemoryPool* _old_pool;
 93 
 94   WorkerThreads _workers;
 95 
 96   virtual void initialize_serviceability();
 97 
 98   void trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs);
 99   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
100 
101   // Allocate in oldgen and record the allocation with the size_policy.
102   HeapWord* allocate_old_gen_and_record(size_t word_size);
103 
104  protected:
105   static inline size_t total_invocations();
106   HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size);
107 
108   inline bool should_alloc_in_eden(size_t size) const;
109   inline void death_march_check(HeapWord* const result, size_t size);
110   HeapWord* mem_allocate_old_gen(size_t size);
111 
112  public:
113   ParallelScavengeHeap() :
114     CollectedHeap(),
115     _death_march_count(0),
116     _young_manager(NULL),
117     _old_manager(NULL),
118     _eden_pool(NULL),
119     _survivor_pool(NULL),
120     _old_pool(NULL),
121     _workers("GC Thread", ParallelGCThreads) { }
122 
123   // For use by VM operations
124   enum CollectionType {
125     Scavenge,
126     MarkSweep
127   };
128 
129   virtual Name kind() const {
130     return CollectedHeap::Parallel;
131   }
132 
133   virtual const char* name() const {
134     return "Parallel";
135   }
136 
137   virtual SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; }
138 
139   virtual GrowableArray<GCMemoryManager*> memory_managers();
140   virtual GrowableArray<MemoryPool*> memory_pools();
141 
142   static PSYoungGen* young_gen() { return _young_gen; }
143   static PSOldGen* old_gen()     { return _old_gen; }
144 
145   virtual PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
146 
147   static PSGCAdaptivePolicyCounters* gc_policy_counters() { return _gc_policy_counters; }
148 
149   static ParallelScavengeHeap* heap() {
150     return named_heap<ParallelScavengeHeap>(CollectedHeap::Parallel);
151   }
152 
153   CardTableBarrierSet* barrier_set();
154   PSCardTable* card_table();
155 
156   // Returns JNI_OK on success
157   virtual jint initialize();
158 
159   virtual void safepoint_synchronize_begin();
160   virtual void safepoint_synchronize_end();
161 
162   void post_initialize();
163   void update_counters();
164 
165   size_t capacity() const;
166   size_t used() const;
167 
168   // Return "true" if all generations have reached the
169   // maximal committed limit that they can reach, without a garbage
170   // collection.
171   virtual bool is_maximal_no_gc() const;
172 
173   virtual void register_nmethod(nmethod* nm);
174   virtual void unregister_nmethod(nmethod* nm);
175   virtual void verify_nmethod(nmethod* nm);
176   virtual void flush_nmethod(nmethod* nm);
177 
178   void prune_scavengable_nmethods();
179 
180   size_t max_capacity() const;
181 
182   // Whether p is in the allocated part of the heap
183   bool is_in(const void* p) const;
184 
185   bool is_in_reserved(const void* p) const;
186 
187   bool is_in_young(oop p);  // reserved part
188   bool is_in_old(oop p);    // reserved part


189 
190   MemRegion reserved_region() const { return _reserved; }
191   HeapWord* base() const { return _reserved.start(); }
192 
193   // Memory allocation.   "gc_time_limit_was_exceeded" will
194   // be set to true if the adaptive size policy determine that
195   // an excessive amount of time is being spent doing collections
196   // and caused a NULL to be returned.  If a NULL is not returned,
197   // "gc_time_limit_was_exceeded" has an undefined meaning.
198   HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
199 
200   // Allocation attempt(s) during a safepoint. It should never be called
201   // to allocate a new TLAB as this allocation might be satisfied out
202   // of the old generation.
203   HeapWord* failed_mem_allocate(size_t size);
204 
205   // Support for System.gc()
206   void collect(GCCause::Cause cause);
207 
208   // These also should be called by the vm thread at a safepoint (e.g., from a
209   // VM operation).
210   //
211   // The first collects the young generation only, unless the scavenge fails; it
212   // will then attempt a full gc.  The second collects the entire heap; if
213   // maximum_compaction is true, it will compact everything and clear all soft
214   // references.
215   inline void invoke_scavenge();
216 
217   // Perform a full collection
218   virtual void do_full_collection(bool clear_all_soft_refs);
219 
220   bool supports_inline_contig_alloc() const { return !UseNUMA; }
221 
222   HeapWord* volatile* top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord* volatile*)-1; }
223   HeapWord** end_addr() const { return !UseNUMA ? young_gen()->end_addr() : (HeapWord**)-1; }
224 
225   void ensure_parsability(bool retire_tlabs);
226   void resize_all_tlabs();
227 
228   size_t tlab_capacity(Thread* thr) const;
229   size_t tlab_used(Thread* thr) const;
230   size_t unsafe_max_tlab_alloc(Thread* thr) const;
231 
232   void object_iterate(ObjectClosure* cl);
233   void object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer);
234   virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
235 
236   HeapWord* block_start(const void* addr) const;
237   bool block_is_obj(const HeapWord* addr) const;
238 
239   void prepare_for_verify();
240   PSHeapSummary create_ps_heap_summary();
241   virtual void print_on(outputStream* st) const;
242   virtual void print_on_error(outputStream* st) const;
243   virtual void gc_threads_do(ThreadClosure* tc) const;
244   virtual void print_tracing_info() const;
245 
246   virtual WorkerThreads* safepoint_workers() { return &_workers; }
247 
248   PreGenGCValues get_pre_gc_values() const;
249   void print_heap_change(const PreGenGCValues& pre_gc_values) const;
250 
251   // Used to print information about locations in the hs_err file.
252   virtual bool print_location(outputStream* st, void* addr) const;
253 
254   void verify(VerifyOption option /* ignored */);
255 
256   // Resize the young generation.  The reserved space for the
257   // generation may be expanded in preparation for the resize.
258   void resize_young_gen(size_t eden_size, size_t survivor_size);
259 
260   // Resize the old generation.  The reserved space for the
261   // generation may be expanded in preparation for the resize.
262   void resize_old_gen(size_t desired_free_space);
263 
264   // Save the tops of the spaces in all generations
265   void record_gen_tops_before_GC() PRODUCT_RETURN;
266 
267   // Mangle the unused parts of all spaces in the heap
268   void gen_mangle_unused_area() PRODUCT_RETURN;
269 
270   GCMemoryManager* old_gc_manager() const { return _old_manager; }
271   GCMemoryManager* young_gc_manager() const { return _young_manager; }
272 
273   WorkerThreads& workers() {
274     return _workers;
275   }
276 };
277 
278 // Class that can be used to print information about the
279 // adaptive size policy at intervals specified by
280 // AdaptiveSizePolicyOutputInterval.  Only print information
281 // if an adaptive size policy is in use.
282 class AdaptiveSizePolicyOutput : AllStatic {
283   static bool enabled() {
284     return UseParallelGC &&
285            UseAdaptiveSizePolicy &&
286            log_is_enabled(Debug, gc, ergo);
287   }
288  public:
289   static void print() {
290     if (enabled()) {
291       ParallelScavengeHeap::heap()->size_policy()->print();
292     }
293   }
294 
295   static void print(AdaptiveSizePolicy* size_policy, uint count) {
296     bool do_print =
297         enabled() &&
298         (AdaptiveSizePolicyOutputInterval > 0) &&
299         (count % AdaptiveSizePolicyOutputInterval) == 0;
300 
301     if (do_print) {
302       size_policy->print();
303     }
304   }
305 };
306 
307 #endif // SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP
--- EOF ---