1 /*
  2  * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_SERIAL_DEFNEWGENERATION_HPP
 26 #define SHARE_GC_SERIAL_DEFNEWGENERATION_HPP
 27 
 28 #include "gc/serial/cSpaceCounters.hpp"
 29 #include "gc/serial/generation.hpp"
 30 #include "gc/serial/tenuredGeneration.hpp"
 31 #include "gc/shared/ageTable.hpp"
 32 #include "gc/shared/copyFailedInfo.hpp"
 33 #include "gc/shared/gc_globals.hpp"
 34 #include "gc/shared/generationCounters.hpp"
 35 #include "gc/shared/preservedMarks.hpp"
 36 #include "gc/shared/stringdedup/stringDedup.hpp"
 37 #include "gc/shared/tlab_globals.hpp"
 38 #include "utilities/align.hpp"
 39 #include "utilities/stack.hpp"
 40 
 41 class ContiguousSpace;
 42 class CSpaceCounters;
 43 class OldGenScanClosure;
 44 class YoungGenScanClosure;
 45 class DefNewTracer;
 46 class ScanWeakRefClosure;
 47 class SerialHeap;
 48 class STWGCTimer;
 49 
 50 // DefNewGeneration is a young generation containing eden, from- and
 51 // to-space.
 52 
 53 class DefNewGeneration: public Generation {
 54   friend class VMStructs;
 55 
 56   TenuredGeneration* _old_gen;
 57 
 58   uint        _tenuring_threshold;   // Tenuring threshold for next collection.
 59   AgeTable    _age_table;
 60   // Size of object to pretenure in words; command line provides bytes
 61   size_t      _pretenure_size_threshold_words;
 62 
 63   // ("Weak") Reference processing support
 64   SpanSubjectToDiscoveryClosure _span_based_discoverer;
 65   ReferenceProcessor* _ref_processor;
 66 
 67   AgeTable*   age_table() { return &_age_table; }
 68 
 69   // Initialize state to optimistically assume no promotion failure will
 70   // happen.
 71   void   init_assuming_no_promotion_failure();
 72   // True iff a promotion has failed in the current collection.
 73   bool   _promotion_failed;
 74   bool   promotion_failed() { return _promotion_failed; }
 75   PromotionFailedInfo _promotion_failed_info;
 76 
 77   // Handling promotion failure.  A young generation collection
 78   // can fail if a live object cannot be copied out of its
 79   // location in eden or from-space during the collection.  If
 80   // a collection fails, the young generation is left in a
 81   // consistent state such that it can be collected by a
 82   // full collection.
 83   //   Before the collection
 84   //     Objects are in eden or from-space
 85   //     All roots into the young generation point into eden or from-space.
 86   //
 87   //   After a failed collection
 88   //     Objects may be in eden, from-space, or to-space
 89   //     An object A in eden or from-space may have a copy B
 90   //       in to-space.  If B exists, all roots that once pointed
 91   //       to A must now point to B.
 92   //     All objects in the young generation are unmarked.
 93   //     Eden, from-space, and to-space will all be collected by
 94   //       the full collection.
 95   void handle_promotion_failure(oop);
 96 
 97   // In the absence of promotion failure, we wouldn't look at "from-space"
 98   // objects after a young-gen collection.  When promotion fails, however,
 99   // the subsequent full collection will look at from-space objects:
100   // therefore we must remove their forwarding pointers.
101   void remove_forwarding_pointers();
102 
103   virtual void restore_preserved_marks();
104 
105   // Preserved marks
106   PreservedMarksSet _preserved_marks_set;
107 
108   Stack<oop, mtGC> _promo_failure_scan_stack;
109   void drain_promo_failure_scan_stack(void);
110   bool _promo_failure_drain_in_progress;
111 
112   // Performance Counters
113   GenerationCounters*  _gen_counters;
114   CSpaceCounters*      _eden_counters;
115   CSpaceCounters*      _from_counters;
116   CSpaceCounters*      _to_counters;
117 
118   // sizing information
119   size_t               _max_eden_size;
120   size_t               _max_survivor_size;
121 
122   // Allocation support
123   bool _should_allocate_from_space;
124   bool should_allocate_from_space() const {
125     return _should_allocate_from_space;
126   }
127   void clear_should_allocate_from_space() {
128     _should_allocate_from_space = false;
129   }
130   void set_should_allocate_from_space() {
131     _should_allocate_from_space = true;
132   }
133 
134   // Tenuring
135   void adjust_desired_tenuring_threshold();
136 
137   // Spaces
138   ContiguousSpace* _eden_space;
139   ContiguousSpace* _from_space;
140   ContiguousSpace* _to_space;
141 
142   // Saved mark word, for to-space
143   HeapWord* _saved_mark_word;
144 
145   STWGCTimer* _gc_timer;
146 
147   DefNewTracer* _gc_tracer;
148 
149   StringDedup::Requests _string_dedup_requests;
150 
151   // Return the size of a survivor space if this generation were of size
152   // gen_size.
153   size_t compute_survivor_size(size_t gen_size, size_t alignment) const {
154     size_t n = gen_size / (SurvivorRatio + 2);
155     return n > alignment ? align_down(n, alignment) : alignment;
156   }
157 
158  public:
159   DefNewGeneration(ReservedSpace rs,
160                    size_t initial_byte_size,
161                    size_t min_byte_size,
162                    size_t max_byte_size,
163                    const char* policy="Serial young collection pauses");
164 
165   // allocate and initialize ("weak") refs processing support
166   void ref_processor_init();
167   ReferenceProcessor* ref_processor() { return _ref_processor; }
168 
169   // Accessing spaces
170   ContiguousSpace* eden() const           { return _eden_space; }
171   ContiguousSpace* from() const           { return _from_space; }
172   ContiguousSpace* to()   const           { return _to_space;   }
173 
174   HeapWord* saved_mark_word()   const    { return _saved_mark_word; }
175   void set_saved_mark_word()             { _saved_mark_word = to()->top(); }
176   bool saved_mark_at_top()               { return _saved_mark_word == _to_space->top(); }
177 
178   // Space enquiries
179   size_t capacity() const;
180   size_t used() const;
181   size_t free() const;
182   size_t max_capacity() const;
183   size_t capacity_before_gc() const;
184 
185   // Returns "TRUE" iff "p" points into the used areas in each space of young-gen.
186   bool is_in(const void* p) const;
187 
188   // Return an estimate of the maximum allocation that could be performed
189   // in the generation without triggering any collection or expansion
190   // activity.  It is "unsafe" because no locks are taken; the result
191   // should be treated as an approximation, not a guarantee, for use in
192   // heuristic resizing decisions.
193   size_t unsafe_max_alloc_nogc() const;
194 
195   size_t contiguous_available() const;
196 
197   size_t max_eden_size() const              { return _max_eden_size; }
198   size_t max_survivor_size() const          { return _max_survivor_size; }
199 
200   // Thread-local allocation buffers
201   bool supports_tlab_allocation() const { return true; }
202   size_t tlab_capacity() const;
203   size_t tlab_used() const;
204   size_t unsafe_max_tlab_alloc() const;
205 
206   // Grow the generation by the specified number of bytes.
207   // The size of bytes is assumed to be properly aligned.
208   // Return true if the expansion was successful.
209   bool expand(size_t bytes);
210 
211 
212   // Iteration
213   void object_iterate(ObjectClosure* blk);
214 
215   HeapWord* block_start(const void* p) const;
216 
217   // Allocation support
218   virtual bool should_allocate(size_t word_size, bool is_tlab) {
219     assert(UseTLAB || !is_tlab, "Should not allocate tlab");
220 
221     size_t overflow_limit    = (size_t)1 << (BitsPerSize_t - LogHeapWordSize);
222 
223     const bool non_zero      = word_size > 0;
224     const bool overflows     = word_size >= overflow_limit;
225     const bool check_too_big = _pretenure_size_threshold_words > 0;
226     const bool not_too_big   = word_size < _pretenure_size_threshold_words;
227     const bool size_ok       = is_tlab || !check_too_big || not_too_big;
228 
229     bool result = !overflows &&
230                   non_zero   &&
231                   size_ok;
232 
233     return result;
234   }
235 
236   HeapWord* allocate(size_t word_size, bool is_tlab);
237   HeapWord* allocate_from_space(size_t word_size);
238 
239   HeapWord* par_allocate(size_t word_size, bool is_tlab);
240 
241   void gc_epilogue(bool full);
242 
243   // Save the tops for eden, from, and to
244   void record_spaces_top();
245 
246   // Accessing marks
247   void save_marks();
248 
249   bool no_allocs_since_save_marks();
250 
251   // Need to declare the full complement of closures, whether we'll
252   // override them or not, or get message from the compiler:
253   //   oop_since_save_marks_iterate_nv hides virtual function...
254   template <typename OopClosureType>
255   void oop_since_save_marks_iterate(OopClosureType* cl);
256 
257   // For Old collection (part of running Full GC), the DefNewGeneration can
258   // contribute the free part of "to-space" as the scratch space.
259   void contribute_scratch(void*& scratch, size_t& num_words);
260 
261   // Reset for contribution of "to-space".
262   void reset_scratch();
263 
264   // GC support
265   void compute_new_size();
266 
267   // Returns true if the collection is likely to be safely
268   // completed. Even if this method returns true, a collection
269   // may not be guaranteed to succeed, and the system should be
270   // able to safely unwind and recover from that failure, albeit
271   // at some additional cost.
272   bool collection_attempt_is_safe();
273 
274   virtual void collect(bool   full,
275                        bool   clear_all_soft_refs,
276                        size_t size,
277                        bool   is_tlab);
278 
279   HeapWord* expand_and_allocate(size_t size, bool is_tlab);
280 
281   oop copy_to_survivor_space(oop old);
282   uint tenuring_threshold() { return _tenuring_threshold; }
283 
284   // Performance Counter support
285   void update_counters();
286 
287   // Printing
288   virtual const char* name() const;
289   virtual const char* short_name() const { return "DefNew"; }
290 
291   void print_on(outputStream* st) const;
292 
293   void verify();
294 
295   bool promo_failure_scan_is_complete() const {
296     return _promo_failure_scan_stack.is_empty();
297   }
298 
299   DefNewTracer* gc_tracer() const { return _gc_tracer; }
300 
301  protected:
302   // If clear_space is true, clear the survivor spaces.  Eden is
303   // cleared if the minimum size of eden is 0.  If mangle_space
304   // is true, also mangle the space in debug mode.
305   void compute_space_boundaries(uintx minimum_eden_size,
306                                 bool clear_space,
307                                 bool mangle_space);
308 
309   // Return adjusted new size for NewSizeThreadIncrease.
310   // If any overflow happens, revert to previous new size.
311   size_t adjust_for_thread_increase(size_t new_size_candidate,
312                                     size_t new_size_before,
313                                     size_t alignment,
314                                     size_t thread_increase_size) const;
315 
316   size_t calculate_thread_increase_size(int threads_count) const;
317 
318 
319   // Scavenge support
320   void swap_spaces();
321 };
322 
323 #endif // SHARE_GC_SERIAL_DEFNEWGENERATION_HPP