1 /*
  2  * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/serial/cardTableRS.hpp"
 27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 28 #include "gc/serial/serialHeap.inline.hpp"
 29 #include "gc/serial/serialStringDedup.inline.hpp"
 30 #include "gc/serial/tenuredGeneration.hpp"
 31 #include "gc/shared/adaptiveSizePolicy.hpp"
 32 #include "gc/shared/ageTable.inline.hpp"
 33 #include "gc/shared/collectorCounters.hpp"
 34 #include "gc/shared/continuationGCSupport.inline.hpp"
 35 #include "gc/shared/gcArguments.hpp"
 36 #include "gc/shared/gcHeapSummary.hpp"
 37 #include "gc/shared/gcLocker.hpp"
 38 #include "gc/shared/gcPolicyCounters.hpp"
 39 #include "gc/shared/gcTimer.hpp"
 40 #include "gc/shared/gcTrace.hpp"
 41 #include "gc/shared/gcTraceTime.inline.hpp"
 42 #include "gc/shared/preservedMarks.inline.hpp"
 43 #include "gc/shared/referencePolicy.hpp"
 44 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 45 #include "gc/shared/space.hpp"
 46 #include "gc/shared/spaceDecorator.hpp"
 47 #include "gc/shared/strongRootsScope.hpp"
 48 #include "gc/shared/weakProcessor.hpp"
 49 #include "logging/log.hpp"
 50 #include "memory/iterator.inline.hpp"
 51 #include "memory/resourceArea.hpp"
 52 #include "oops/instanceRefKlass.hpp"
 53 #include "oops/oop.inline.hpp"
 54 #include "runtime/java.hpp"
 55 #include "runtime/javaThread.hpp"
 56 #include "runtime/prefetch.inline.hpp"
 57 #include "runtime/threads.hpp"
 58 #include "utilities/align.hpp"
 59 #include "utilities/copy.hpp"
 60 #include "utilities/globalDefinitions.hpp"
 61 #include "utilities/stack.inline.hpp"
 62 
 63 class PromoteFailureClosure : public InHeapScanClosure {
 64   template <typename T>
 65   void do_oop_work(T* p) {
 66     assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
 67     assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
 68 
 69     try_scavenge(p, [] (auto) {});
 70   }
 71 public:
 72   PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
 73 
 74   void do_oop(oop* p)       { do_oop_work(p); }
 75   void do_oop(narrowOop* p) { do_oop_work(p); }
 76 };
 77 
 78 class RootScanClosure : public OffHeapScanClosure {
 79   template <typename T>
 80   void do_oop_work(T* p) {
 81     assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
 82 
 83     try_scavenge(p,  [] (auto) {});
 84   }
 85 public:
 86   RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
 87 
 88   void do_oop(oop* p)       { do_oop_work(p); }
 89   void do_oop(narrowOop* p) { do_oop_work(p); }
 90 };
 91 
 92 class CLDScanClosure: public CLDClosure {
 93 
 94   class CLDOopClosure : public OffHeapScanClosure {
 95     ClassLoaderData* _scanned_cld;
 96 
 97     template <typename T>
 98     void do_oop_work(T* p) {
 99       assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
100 
101       try_scavenge(p, [&] (oop new_obj) {
102         assert(_scanned_cld != nullptr, "inv");
103         if (is_in_young_gen(new_obj) && !_scanned_cld->has_modified_oops()) {
104           _scanned_cld->record_modified_oops();
105         }
106       });
107     }
108 
109   public:
110     CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
111       _scanned_cld(nullptr) {}
112 
113     void set_scanned_cld(ClassLoaderData* cld) {
114       assert(cld == nullptr || _scanned_cld == nullptr, "Must be");
115       _scanned_cld = cld;
116     }
117 
118     void do_oop(oop* p)       { do_oop_work(p); }
119     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
120   };
121 
122   CLDOopClosure _oop_closure;
123  public:
124   CLDScanClosure(DefNewGeneration* g) : _oop_closure(g) {}
125 
126   void do_cld(ClassLoaderData* cld) {
127     // If the cld has not been dirtied we know that there's
128     // no references into  the young gen and we can skip it.
129     if (cld->has_modified_oops()) {
130 
131       // Tell the closure which CLD is being scanned so that it can be dirtied
132       // if oops are left pointing into the young gen.
133       _oop_closure.set_scanned_cld(cld);
134 
135       // Clean the cld since we're going to scavenge all the metadata.
136       cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
137 
138       _oop_closure.set_scanned_cld(nullptr);
139     }
140   }
141 };
142 
143 class IsAliveClosure: public BoolObjectClosure {
144   HeapWord*         _young_gen_end;
145 public:
146   IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
147 
148   bool do_object_b(oop p) {
149     return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
150   }
151 };
152 
153 class AdjustWeakRootClosure: public OffHeapScanClosure {
154   template <class T>
155   void do_oop_work(T* p) {
156     DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
157     assert(!heap->is_in_reserved(p), "outside the heap");
158 
159     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
160     if (is_in_young_gen(obj)) {
161       assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
162       assert(obj->is_forwarded(), "forwarded before weak-root-processing");
163       oop new_obj = obj->forwardee();
164       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
165     }
166   }
167  public:
168   AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
169 
170   void do_oop(oop* p)       { do_oop_work(p); }
171   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
172 };
173 
174 class KeepAliveClosure: public OopClosure {
175   DefNewGeneration* _young_gen;
176   HeapWord*         _young_gen_end;
177   CardTableRS* _rs;
178 
179   bool is_in_young_gen(void* p) const {
180     return p < _young_gen_end;
181   }
182 
183   template <class T>
184   void do_oop_work(T* p) {
185     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
186 
187     if (is_in_young_gen(obj)) {
188       oop new_obj = obj->is_forwarded() ? obj->forwardee()
189                                         : _young_gen->copy_to_survivor_space(obj);
190       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
191 
192       if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
193         _rs->inline_write_ref_field_gc(p);
194       }
195     }
196   }
197 public:
198   KeepAliveClosure(DefNewGeneration* g) :
199     _young_gen(g),
200     _young_gen_end(g->reserved().end()),
201     _rs(SerialHeap::heap()->rem_set()) {}
202 
203   void do_oop(oop* p)       { do_oop_work(p); }
204   void do_oop(narrowOop* p) { do_oop_work(p); }
205 };
206 
207 class FastEvacuateFollowersClosure: public VoidClosure {
208   SerialHeap* _heap;
209   YoungGenScanClosure* _young_cl;
210   OldGenScanClosure* _old_cl;
211 public:
212   FastEvacuateFollowersClosure(SerialHeap* heap,
213                                YoungGenScanClosure* young_cl,
214                                OldGenScanClosure* old_cl) :
215     _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
216   {}
217 
218   void do_void() {
219     _heap->scan_evacuated_objs(_young_cl, _old_cl);
220   }
221 };
222 
223 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
224                                    size_t initial_size,
225                                    size_t min_size,
226                                    size_t max_size,
227                                    const char* policy)
228   : Generation(rs, initial_size),
229     _promotion_failed(false),
230     _preserved_marks_set(false /* in_c_heap */),
231     _promo_failure_drain_in_progress(false),
232     _should_allocate_from_space(false),
233     _string_dedup_requests()
234 {
235   MemRegion cmr((HeapWord*)_virtual_space.low(),
236                 (HeapWord*)_virtual_space.high());
237   SerialHeap* gch = SerialHeap::heap();
238 
239   gch->rem_set()->resize_covered_region(cmr);
240 
241   _eden_space = new ContiguousSpace();
242   _from_space = new ContiguousSpace();
243   _to_space   = new ContiguousSpace();
244 
245   // Compute the maximum eden and survivor space sizes. These sizes
246   // are computed assuming the entire reserved space is committed.
247   // These values are exported as performance counters.
248   uintx size = _virtual_space.reserved_size();
249   _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
250   _max_eden_size = size - (2*_max_survivor_size);
251 
252   // allocate the performance counters
253 
254   // Generation counters -- generation 0, 3 subspaces
255   _gen_counters = new GenerationCounters("new", 0, 3,
256       min_size, max_size, &_virtual_space);
257   _gc_counters = new CollectorCounters(policy, 0);
258 
259   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
260                                       _gen_counters);
261   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
262                                       _gen_counters);
263   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
264                                     _gen_counters);
265 
266   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
267   update_counters();
268   _old_gen = nullptr;
269   _tenuring_threshold = MaxTenuringThreshold;
270   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
271 
272   _ref_processor = nullptr;
273 
274   _gc_timer = new STWGCTimer();
275 
276   _gc_tracer = new DefNewTracer();
277 }
278 
279 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
280                                                 bool clear_space,
281                                                 bool mangle_space) {
282   // If the spaces are being cleared (only done at heap initialization
283   // currently), the survivor spaces need not be empty.
284   // Otherwise, no care is taken for used areas in the survivor spaces
285   // so check.
286   assert(clear_space || (to()->is_empty() && from()->is_empty()),
287     "Initialization of the survivor spaces assumes these are empty");
288 
289   // Compute sizes
290   uintx size = _virtual_space.committed_size();
291   uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
292   uintx eden_size = size - (2*survivor_size);
293   if (eden_size > max_eden_size()) {
294     // Need to reduce eden_size to satisfy the max constraint. The delta needs
295     // to be 2*SpaceAlignment aligned so that both survivors are properly
296     // aligned.
297     uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
298     eden_size     -= eden_delta;
299     survivor_size += eden_delta/2;
300   }
301   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
302 
303   if (eden_size < minimum_eden_size) {
304     // May happen due to 64Kb rounding, if so adjust eden size back up
305     minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
306     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
307     uintx unaligned_survivor_size =
308       align_down(maximum_survivor_size, SpaceAlignment);
309     survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
310     eden_size = size - (2*survivor_size);
311     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
312     assert(eden_size >= minimum_eden_size, "just checking");
313   }
314 
315   char *eden_start = _virtual_space.low();
316   char *from_start = eden_start + eden_size;
317   char *to_start   = from_start + survivor_size;
318   char *to_end     = to_start   + survivor_size;
319 
320   assert(to_end == _virtual_space.high(), "just checking");
321   assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
322   assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
323   assert(is_aligned(to_start, SpaceAlignment),   "checking alignment");
324 
325   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
326   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
327   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
328 
329   // A minimum eden size implies that there is a part of eden that
330   // is being used and that affects the initialization of any
331   // newly formed eden.
332   bool live_in_eden = minimum_eden_size > 0;
333 
334   // Reset the spaces for their new regions.
335   eden()->initialize(edenMR,
336                      clear_space && !live_in_eden,
337                      SpaceDecorator::Mangle);
338   // If clear_space and live_in_eden, we will not have cleared any
339   // portion of eden above its top. This can cause newly
340   // expanded space not to be mangled if using ZapUnusedHeapArea.
341   // We explicitly do such mangling here.
342   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
343     eden()->mangle_unused_area();
344   }
345   from()->initialize(fromMR, clear_space, mangle_space);
346   to()->initialize(toMR, clear_space, mangle_space);
347 }
348 
349 void DefNewGeneration::swap_spaces() {
350   ContiguousSpace* s = from();
351   _from_space        = to();
352   _to_space          = s;
353 
354   if (UsePerfData) {
355     CSpaceCounters* c = _from_counters;
356     _from_counters = _to_counters;
357     _to_counters = c;
358   }
359 }
360 
361 bool DefNewGeneration::expand(size_t bytes) {
362   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
363   bool success = _virtual_space.expand_by(bytes);
364   if (success && ZapUnusedHeapArea) {
365     // Mangle newly committed space immediately because it
366     // can be done here more simply that after the new
367     // spaces have been computed.
368     HeapWord* new_high = (HeapWord*) _virtual_space.high();
369     MemRegion mangle_region(prev_high, new_high);
370     SpaceMangler::mangle_region(mangle_region);
371   }
372 
373   // Do not attempt an expand-to-the reserve size.  The
374   // request should properly observe the maximum size of
375   // the generation so an expand-to-reserve should be
376   // unnecessary.  Also a second call to expand-to-reserve
377   // value potentially can cause an undue expansion.
378   // For example if the first expand fail for unknown reasons,
379   // but the second succeeds and expands the heap to its maximum
380   // value.
381   if (GCLocker::is_active()) {
382     log_debug(gc)("Garbage collection disabled, expanded heap instead");
383   }
384 
385   return success;
386 }
387 
388 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
389     size_t thread_increase_size = 0;
390     // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
391     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
392       thread_increase_size = threads_count * NewSizeThreadIncrease;
393     }
394     return thread_increase_size;
395 }
396 
397 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
398                                                     size_t new_size_before,
399                                                     size_t alignment,
400                                                     size_t thread_increase_size) const {
401   size_t desired_new_size = new_size_before;
402 
403   if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
404 
405     // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
406     if (new_size_candidate <= max_uintx - thread_increase_size) {
407       new_size_candidate += thread_increase_size;
408 
409       // 2. Check an overflow at 'align_up'.
410       size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
411       if (new_size_candidate <= aligned_max) {
412         desired_new_size = align_up(new_size_candidate, alignment);
413       }
414     }
415   }
416 
417   return desired_new_size;
418 }
419 
420 void DefNewGeneration::compute_new_size() {
421   // This is called after a GC that includes the old generation, so from-space
422   // will normally be empty.
423   // Note that we check both spaces, since if scavenge failed they revert roles.
424   // If not we bail out (otherwise we would have to relocate the objects).
425   if (!from()->is_empty() || !to()->is_empty()) {
426     return;
427   }
428 
429   SerialHeap* gch = SerialHeap::heap();
430 
431   size_t old_size = gch->old_gen()->capacity();
432   size_t new_size_before = _virtual_space.committed_size();
433   size_t min_new_size = NewSize;
434   size_t max_new_size = reserved().byte_size();
435   assert(min_new_size <= new_size_before &&
436          new_size_before <= max_new_size,
437          "just checking");
438   // All space sizes must be multiples of Generation::GenGrain.
439   size_t alignment = Generation::GenGrain;
440 
441   int threads_count = Threads::number_of_non_daemon_threads();
442   size_t thread_increase_size = calculate_thread_increase_size(threads_count);
443 
444   size_t new_size_candidate = old_size / NewRatio;
445   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
446   // and reverts to previous value if any overflow happens
447   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
448                                                        alignment, thread_increase_size);
449 
450   // Adjust new generation size
451   desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
452   assert(desired_new_size <= max_new_size, "just checking");
453 
454   bool changed = false;
455   if (desired_new_size > new_size_before) {
456     size_t change = desired_new_size - new_size_before;
457     assert(change % alignment == 0, "just checking");
458     if (expand(change)) {
459        changed = true;
460     }
461     // If the heap failed to expand to the desired size,
462     // "changed" will be false.  If the expansion failed
463     // (and at this point it was expected to succeed),
464     // ignore the failure (leaving "changed" as false).
465   }
466   if (desired_new_size < new_size_before && eden()->is_empty()) {
467     // bail out of shrinking if objects in eden
468     size_t change = new_size_before - desired_new_size;
469     assert(change % alignment == 0, "just checking");
470     _virtual_space.shrink_by(change);
471     changed = true;
472   }
473   if (changed) {
474     // The spaces have already been mangled at this point but
475     // may not have been cleared (set top = bottom) and should be.
476     // Mangling was done when the heap was being expanded.
477     compute_space_boundaries(eden()->used(),
478                              SpaceDecorator::Clear,
479                              SpaceDecorator::DontMangle);
480     MemRegion cmr((HeapWord*)_virtual_space.low(),
481                   (HeapWord*)_virtual_space.high());
482     gch->rem_set()->resize_covered_region(cmr);
483 
484     log_debug(gc, ergo, heap)(
485         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
486         new_size_before/K, _virtual_space.committed_size()/K,
487         eden()->capacity()/K, from()->capacity()/K);
488     log_trace(gc, ergo, heap)(
489         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
490           thread_increase_size/K, threads_count);
491       }
492 }
493 
494 void DefNewGeneration::ref_processor_init() {
495   assert(_ref_processor == nullptr, "a reference processor already exists");
496   assert(!_reserved.is_empty(), "empty generation?");
497   _span_based_discoverer.set_span(_reserved);
498   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
499 }
500 
501 size_t DefNewGeneration::capacity() const {
502   return eden()->capacity()
503        + from()->capacity();  // to() is only used during scavenge
504 }
505 
506 
507 size_t DefNewGeneration::used() const {
508   return eden()->used()
509        + from()->used();      // to() is only used during scavenge
510 }
511 
512 
513 size_t DefNewGeneration::free() const {
514   return eden()->free()
515        + from()->free();      // to() is only used during scavenge
516 }
517 
518 size_t DefNewGeneration::max_capacity() const {
519   const size_t reserved_bytes = reserved().byte_size();
520   return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
521 }
522 
523 bool DefNewGeneration::is_in(const void* p) const {
524   return eden()->is_in(p)
525       || from()->is_in(p)
526       || to()  ->is_in(p);
527 }
528 
529 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
530   return eden()->free();
531 }
532 
533 size_t DefNewGeneration::capacity_before_gc() const {
534   return eden()->capacity();
535 }
536 
537 size_t DefNewGeneration::contiguous_available() const {
538   return eden()->free();
539 }
540 
541 
542 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
543   eden()->object_iterate(blk);
544   from()->object_iterate(blk);
545 }
546 
547 // If "p" is in the space, returns the address of the start of the
548 // "block" that contains "p".  We say "block" instead of "object" since
549 // some heaps may not pack objects densely; a chunk may either be an
550 // object or a non-object.  If "p" is not in the space, return null.
551 // Very general, slow implementation.
552 static HeapWord* block_start_const(const ContiguousSpace* cs, const void* p) {
553   assert(MemRegion(cs->bottom(), cs->end()).contains(p),
554          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
555          p2i(p), p2i(cs->bottom()), p2i(cs->end()));
556   if (p >= cs->top()) {
557     return cs->top();
558   } else {
559     HeapWord* last = cs->bottom();
560     HeapWord* cur = last;
561     while (cur <= p) {
562       last = cur;
563       cur += cast_to_oop(cur)->size();
564     }
565     assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
566     return last;
567   }
568 }
569 
570 HeapWord* DefNewGeneration::block_start(const void* p) const {
571   if (eden()->is_in_reserved(p)) {
572     return block_start_const(eden(), p);
573   }
574   if (from()->is_in_reserved(p)) {
575     return block_start_const(from(), p);
576   }
577   assert(to()->is_in_reserved(p), "inv");
578   return block_start_const(to(), p);
579 }
580 
581 // The last collection bailed out, we are running out of heap space,
582 // so we try to allocate the from-space, too.
583 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
584   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
585 
586   // If the Heap_lock is not locked by this thread, this will be called
587   // again later with the Heap_lock held.
588   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
589 
590   HeapWord* result = nullptr;
591   if (do_alloc) {
592     result = from()->allocate(size);
593   }
594 
595   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
596                         size,
597                         SerialHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
598                           "true" : "false",
599                         Heap_lock->is_locked() ? "locked" : "unlocked",
600                         from()->free(),
601                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
602                         do_alloc ? "  Heap_lock is not owned by self" : "",
603                         result == nullptr ? "null" : "object");
604 
605   return result;
606 }
607 
608 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, bool is_tlab) {
609   // We don't attempt to expand the young generation (but perhaps we should.)
610   return allocate(size, is_tlab);
611 }
612 
613 void DefNewGeneration::adjust_desired_tenuring_threshold() {
614   // Set the desired survivor size to half the real survivor space
615   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
616   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
617 
618   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
619 
620   if (UsePerfData) {
621     GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
622     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
623     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
624   }
625 
626   age_table()->print_age_table();
627 }
628 
629 bool DefNewGeneration::collect(bool clear_all_soft_refs) {
630   SerialHeap* heap = SerialHeap::heap();
631 
632   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
633   _gc_timer->register_gc_start();
634   _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
635   _ref_processor->start_discovery(clear_all_soft_refs);
636 
637   _old_gen = heap->old_gen();
638 
639   init_assuming_no_promotion_failure();
640 
641   GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
642 
643   heap->trace_heap_before_gc(_gc_tracer);
644 
645   // These can be shared for all code paths
646   IsAliveClosure is_alive(this);
647 
648   age_table()->clear();
649   to()->clear(SpaceDecorator::Mangle);
650   // The preserved marks should be empty at the start of the GC.
651   _preserved_marks_set.init(1);
652 
653   YoungGenScanClosure young_gen_cl(this);
654   OldGenScanClosure   old_gen_cl(this);
655 
656   FastEvacuateFollowersClosure evacuate_followers(heap,
657                                                   &young_gen_cl,
658                                                   &old_gen_cl);
659 
660   {
661     StrongRootsScope srs(0);
662     RootScanClosure root_cl{this};
663     CLDScanClosure cld_cl{this};
664 
665     MarkingNMethodClosure code_cl(&root_cl,
666                                   NMethodToOopClosure::FixRelocations,
667                                   false /* keepalive_nmethods */);
668 
669     HeapWord* saved_top_in_old_gen = _old_gen->space()->top();
670     heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
671                         &root_cl,
672                         &cld_cl,
673                         &cld_cl,
674                         &code_cl);
675 
676     _old_gen->scan_old_to_young_refs(saved_top_in_old_gen);
677   }
678 
679   // "evacuate followers".
680   evacuate_followers.do_void();
681 
682   {
683     // Reference processing
684     KeepAliveClosure keep_alive(this);
685     ReferenceProcessor* rp = ref_processor();
686     ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
687     SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
688     const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
689     _gc_tracer->report_gc_reference_stats(stats);
690     _gc_tracer->report_tenuring_threshold(tenuring_threshold());
691     pt.print_all_references();
692   }
693 
694   {
695     AdjustWeakRootClosure cl{this};
696     WeakProcessor::weak_oops_do(&is_alive, &cl);
697   }
698 
699   _string_dedup_requests.flush();
700 
701   if (!_promotion_failed) {
702     // Swap the survivor spaces.
703     eden()->clear(SpaceDecorator::Mangle);
704     from()->clear(SpaceDecorator::Mangle);
705     swap_spaces();
706 
707     assert(to()->is_empty(), "to space should be empty now");
708 
709     adjust_desired_tenuring_threshold();
710 
711     assert(!heap->incremental_collection_failed(), "Should be clear");
712   } else {
713     assert(_promo_failure_scan_stack.is_empty(), "post condition");
714     _promo_failure_scan_stack.clear(true); // Clear cached segments.
715 
716     remove_forwarding_pointers();
717     log_info(gc, promotion)("Promotion failed");
718     // Add to-space to the list of space to compact
719     // when a promotion failure has occurred.  In that
720     // case there can be live objects in to-space
721     // as a result of a partial evacuation of eden
722     // and from-space.
723     swap_spaces();   // For uniformity wrt ParNewGeneration.
724     heap->set_incremental_collection_failed();
725 
726     _gc_tracer->report_promotion_failed(_promotion_failed_info);
727 
728     // Reset the PromotionFailureALot counters.
729     NOT_PRODUCT(heap->reset_promotion_should_fail();)
730   }
731   // We should have processed and cleared all the preserved marks.
732   _preserved_marks_set.reclaim();
733 
734   heap->trace_heap_after_gc(_gc_tracer);
735 
736   _gc_timer->register_gc_end();
737 
738   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
739 
740   return !_promotion_failed;
741 }
742 
743 void DefNewGeneration::init_assuming_no_promotion_failure() {
744   _promotion_failed = false;
745   _promotion_failed_info.reset();
746 }
747 
748 void DefNewGeneration::remove_forwarding_pointers() {
749   assert(_promotion_failed, "precondition");
750 
751   // Will enter Full GC soon due to failed promotion. Must reset the mark word
752   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
753   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
754   struct ResetForwardedMarkWord : ObjectClosure {
755     void do_object(oop obj) override {
756       if (obj->is_forwarded()) {
757         obj->init_mark();
758       }
759     }
760   } cl;
761   eden()->object_iterate(&cl);
762   from()->object_iterate(&cl);
763 
764   restore_preserved_marks();
765 }
766 
767 void DefNewGeneration::restore_preserved_marks() {
768   _preserved_marks_set.restore(nullptr);
769 }
770 
771 void DefNewGeneration::handle_promotion_failure(oop old) {
772   log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
773 
774   _promotion_failed = true;
775   _promotion_failed_info.register_copy_failure(old->size());
776   _preserved_marks_set.get()->push_if_necessary(old, old->mark());
777 
778   ContinuationGCSupport::transform_stack_chunk(old);
779 
780   // forward to self
781   old->forward_to(old);
782 
783   _promo_failure_scan_stack.push(old);
784 
785   if (!_promo_failure_drain_in_progress) {
786     // prevent recursion in copy_to_survivor_space()
787     _promo_failure_drain_in_progress = true;
788     drain_promo_failure_scan_stack();
789     _promo_failure_drain_in_progress = false;
790   }
791 }
792 
793 oop DefNewGeneration::copy_to_survivor_space(oop old) {
794   assert(is_in_reserved(old) && !old->is_forwarded(),
795          "shouldn't be scavenging this oop");
796   size_t s = old->size();
797   oop obj = nullptr;
798 
799   // Try allocating obj in to-space (unless too old)
800   if (old->age() < tenuring_threshold()) {
801     obj = cast_to_oop(to()->allocate(s));
802   }
803 
804   bool new_obj_is_tenured = false;
805   // Otherwise try allocating obj tenured
806   if (obj == nullptr) {
807     obj = _old_gen->promote(old, s);
808     if (obj == nullptr) {
809       handle_promotion_failure(old);
810       return old;
811     }
812 
813     ContinuationGCSupport::transform_stack_chunk(obj);
814 
815     new_obj_is_tenured = true;
816   } else {
817     // Prefetch beyond obj
818     const intx interval = PrefetchCopyIntervalInBytes;
819     Prefetch::write(obj, interval);
820 
821     // Copy obj
822     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
823 
824     ContinuationGCSupport::transform_stack_chunk(obj);
825 
826     // Increment age if obj still in new generation
827     obj->incr_age();
828     age_table()->add(obj, s);
829   }
830 
831   // Done, insert forward pointer to obj in this header
832   old->forward_to(obj);
833 
834   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
835     // Record old; request adds a new weak reference, which reference
836     // processing expects to refer to a from-space object.
837     _string_dedup_requests.add(old);
838   }
839   return obj;
840 }
841 
842 void DefNewGeneration::drain_promo_failure_scan_stack() {
843   PromoteFailureClosure cl{this};
844   while (!_promo_failure_scan_stack.is_empty()) {
845      oop obj = _promo_failure_scan_stack.pop();
846      obj->oop_iterate(&cl);
847   }
848 }
849 
850 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
851   if (_promotion_failed) {
852     return;
853   }
854 
855   const size_t MinFreeScratchWords = 100;
856 
857   ContiguousSpace* to_space = to();
858   const size_t free_words = pointer_delta(to_space->end(), to_space->top());
859   if (free_words >= MinFreeScratchWords) {
860     scratch = to_space->top();
861     num_words = free_words;
862   }
863 }
864 
865 void DefNewGeneration::reset_scratch() {
866   // If contributing scratch in to_space, mangle all of
867   // to_space if ZapUnusedHeapArea.  This is needed because
868   // top is not maintained while using to-space as scratch.
869   if (ZapUnusedHeapArea) {
870     to()->mangle_unused_area();
871   }
872 }
873 
874 bool DefNewGeneration::collection_attempt_is_safe() {
875   if (!to()->is_empty()) {
876     log_trace(gc)(":: to is not empty ::");
877     return false;
878   }
879   if (_old_gen == nullptr) {
880     _old_gen = SerialHeap::heap()->old_gen();
881   }
882   return _old_gen->promotion_attempt_is_safe(used());
883 }
884 
885 void DefNewGeneration::gc_epilogue(bool full) {
886   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
887 
888   assert(!GCLocker::is_active(), "We should not be executing here");
889   // Check if the heap is approaching full after a collection has
890   // been done.  Generally the young generation is empty at
891   // a minimum at the end of a collection.  If it is not, then
892   // the heap is approaching full.
893   SerialHeap* gch = SerialHeap::heap();
894   if (full) {
895     DEBUG_ONLY(seen_incremental_collection_failed = false;)
896     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
897       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
898                             GCCause::to_string(gch->gc_cause()));
899       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
900       set_should_allocate_from_space(); // we seem to be running out of space
901     } else {
902       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
903                             GCCause::to_string(gch->gc_cause()));
904       gch->clear_incremental_collection_failed(); // We just did a full collection
905       clear_should_allocate_from_space(); // if set
906     }
907   } else {
908 #ifdef ASSERT
909     // It is possible that incremental_collection_failed() == true
910     // here, because an attempted scavenge did not succeed. The policy
911     // is normally expected to cause a full collection which should
912     // clear that condition, so we should not be here twice in a row
913     // with incremental_collection_failed() == true without having done
914     // a full collection in between.
915     if (!seen_incremental_collection_failed &&
916         gch->incremental_collection_failed()) {
917       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
918                             GCCause::to_string(gch->gc_cause()));
919       seen_incremental_collection_failed = true;
920     } else if (seen_incremental_collection_failed) {
921       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
922                             GCCause::to_string(gch->gc_cause()));
923       seen_incremental_collection_failed = false;
924     }
925 #endif // ASSERT
926   }
927 
928   // update the generation and space performance counters
929   update_counters();
930   gch->counters()->update_counters();
931 }
932 
933 void DefNewGeneration::update_counters() {
934   if (UsePerfData) {
935     _eden_counters->update_all();
936     _from_counters->update_all();
937     _to_counters->update_all();
938     _gen_counters->update_all();
939   }
940 }
941 
942 void DefNewGeneration::verify() {
943   eden()->verify();
944   from()->verify();
945     to()->verify();
946 }
947 
948 void DefNewGeneration::print_on(outputStream* st) const {
949   Generation::print_on(st);
950   st->print("  eden");
951   eden()->print_on(st);
952   st->print("  from");
953   from()->print_on(st);
954   st->print("  to  ");
955   to()->print_on(st);
956 }
957 
958 
959 const char* DefNewGeneration::name() const {
960   return "def new generation";
961 }
962 
963 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
964   // This is the slow-path allocation for the DefNewGeneration.
965   // Most allocations are fast-path in compiled code.
966   // We try to allocate from the eden.  If that works, we are happy.
967   // Note that since DefNewGeneration supports lock-free allocation, we
968   // have to use it here, as well.
969   HeapWord* result = eden()->par_allocate(word_size);
970   if (result == nullptr) {
971     // If the eden is full and the last collection bailed out, we are running
972     // out of heap space, and we try to allocate the from-space, too.
973     // allocate_from_space can't be inlined because that would introduce a
974     // circular dependency at compile time.
975     result = allocate_from_space(word_size);
976   }
977   return result;
978 }
979 
980 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
981                                          bool is_tlab) {
982   return eden()->par_allocate(word_size);
983 }
984 
985 size_t DefNewGeneration::tlab_capacity() const {
986   return eden()->capacity();
987 }
988 
989 size_t DefNewGeneration::tlab_used() const {
990   return eden()->used();
991 }
992 
993 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
994   return unsafe_max_alloc_nogc();
995 }