1 /*
  2  * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/serial/cardTableRS.hpp"
 27 #include "gc/serial/serialGcRefProcProxyTask.hpp"
 28 #include "gc/serial/serialHeap.inline.hpp"
 29 #include "gc/serial/serialStringDedup.inline.hpp"
 30 #include "gc/serial/tenuredGeneration.hpp"
 31 #include "gc/shared/adaptiveSizePolicy.hpp"
 32 #include "gc/shared/ageTable.inline.hpp"
 33 #include "gc/shared/collectorCounters.hpp"
 34 #include "gc/shared/continuationGCSupport.inline.hpp"
 35 #include "gc/shared/gcArguments.hpp"
 36 #include "gc/shared/gcHeapSummary.hpp"
 37 #include "gc/shared/gcLocker.hpp"
 38 #include "gc/shared/gcPolicyCounters.hpp"
 39 #include "gc/shared/gcTimer.hpp"
 40 #include "gc/shared/gcTrace.hpp"
 41 #include "gc/shared/gcTraceTime.inline.hpp"
 42 #include "gc/shared/referencePolicy.hpp"
 43 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 44 #include "gc/shared/space.hpp"
 45 #include "gc/shared/spaceDecorator.hpp"
 46 #include "gc/shared/strongRootsScope.hpp"
 47 #include "gc/shared/weakProcessor.hpp"
 48 #include "logging/log.hpp"
 49 #include "memory/iterator.inline.hpp"
 50 #include "memory/resourceArea.hpp"
 51 #include "oops/instanceRefKlass.hpp"
 52 #include "oops/oop.inline.hpp"
 53 #include "runtime/java.hpp"
 54 #include "runtime/javaThread.hpp"
 55 #include "runtime/prefetch.inline.hpp"
 56 #include "runtime/threads.hpp"
 57 #include "utilities/align.hpp"
 58 #include "utilities/copy.hpp"
 59 #include "utilities/globalDefinitions.hpp"
 60 #include "utilities/stack.inline.hpp"
 61 
 62 class PromoteFailureClosure : public InHeapScanClosure {
 63   template <typename T>
 64   void do_oop_work(T* p) {
 65     assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
 66     assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
 67 
 68     try_scavenge(p, [] (auto) {});
 69   }
 70 public:
 71   PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
 72 
 73   void do_oop(oop* p)       { do_oop_work(p); }
 74   void do_oop(narrowOop* p) { do_oop_work(p); }
 75 };
 76 
 77 class RootScanClosure : public OffHeapScanClosure {
 78   template <typename T>
 79   void do_oop_work(T* p) {
 80     assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
 81 
 82     try_scavenge(p,  [] (auto) {});
 83   }
 84 public:
 85   RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
 86 
 87   void do_oop(oop* p)       { do_oop_work(p); }
 88   void do_oop(narrowOop* p) { do_oop_work(p); }
 89 };
 90 
 91 class CLDScanClosure: public CLDClosure {
 92 
 93   class CLDOopClosure : public OffHeapScanClosure {
 94     ClassLoaderData* _scanned_cld;
 95 
 96     template <typename T>
 97     void do_oop_work(T* p) {
 98       assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
 99 
100       try_scavenge(p, [&] (oop new_obj) {
101         assert(_scanned_cld != nullptr, "inv");
102         if (is_in_young_gen(new_obj) && !_scanned_cld->has_modified_oops()) {
103           _scanned_cld->record_modified_oops();
104         }
105       });
106     }
107 
108   public:
109     CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
110       _scanned_cld(nullptr) {}
111 
112     void set_scanned_cld(ClassLoaderData* cld) {
113       assert(cld == nullptr || _scanned_cld == nullptr, "Must be");
114       _scanned_cld = cld;
115     }
116 
117     void do_oop(oop* p)       { do_oop_work(p); }
118     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
119   };
120 
121   CLDOopClosure _oop_closure;
122  public:
123   CLDScanClosure(DefNewGeneration* g) : _oop_closure(g) {}
124 
125   void do_cld(ClassLoaderData* cld) {
126     // If the cld has not been dirtied we know that there's
127     // no references into  the young gen and we can skip it.
128     if (cld->has_modified_oops()) {
129 
130       // Tell the closure which CLD is being scanned so that it can be dirtied
131       // if oops are left pointing into the young gen.
132       _oop_closure.set_scanned_cld(cld);
133 
134       // Clean the cld since we're going to scavenge all the metadata.
135       cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
136 
137       _oop_closure.set_scanned_cld(nullptr);
138     }
139   }
140 };
141 
142 class IsAliveClosure: public BoolObjectClosure {
143   HeapWord*         _young_gen_end;
144 public:
145   IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
146 
147   bool do_object_b(oop p) {
148     return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
149   }
150 };
151 
152 class AdjustWeakRootClosure: public OffHeapScanClosure {
153   template <class T>
154   void do_oop_work(T* p) {
155     DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
156     assert(!heap->is_in_reserved(p), "outside the heap");
157 
158     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
159     if (is_in_young_gen(obj)) {
160       assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
161       assert(obj->is_forwarded(), "forwarded before weak-root-processing");
162       oop new_obj = obj->forwardee();
163       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
164     }
165   }
166  public:
167   AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
168 
169   void do_oop(oop* p)       { do_oop_work(p); }
170   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
171 };
172 
173 class KeepAliveClosure: public OopClosure {
174   DefNewGeneration* _young_gen;
175   HeapWord*         _young_gen_end;
176   CardTableRS* _rs;
177 
178   bool is_in_young_gen(void* p) const {
179     return p < _young_gen_end;
180   }
181 
182   template <class T>
183   void do_oop_work(T* p) {
184     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
185 
186     if (is_in_young_gen(obj)) {
187       oop new_obj = obj->is_forwarded() ? obj->forwardee()
188                                         : _young_gen->copy_to_survivor_space(obj);
189       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
190 
191       if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
192         _rs->inline_write_ref_field_gc(p);
193       }
194     }
195   }
196 public:
197   KeepAliveClosure(DefNewGeneration* g) :
198     _young_gen(g),
199     _young_gen_end(g->reserved().end()),
200     _rs(SerialHeap::heap()->rem_set()) {}
201 
202   void do_oop(oop* p)       { do_oop_work(p); }
203   void do_oop(narrowOop* p) { do_oop_work(p); }
204 };
205 
206 class FastEvacuateFollowersClosure: public VoidClosure {
207   SerialHeap* _heap;
208   YoungGenScanClosure* _young_cl;
209   OldGenScanClosure* _old_cl;
210 public:
211   FastEvacuateFollowersClosure(SerialHeap* heap,
212                                YoungGenScanClosure* young_cl,
213                                OldGenScanClosure* old_cl) :
214     _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
215   {}
216 
217   void do_void() {
218     _heap->scan_evacuated_objs(_young_cl, _old_cl);
219   }
220 };
221 
222 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
223                                    size_t initial_size,
224                                    size_t min_size,
225                                    size_t max_size,
226                                    const char* policy)
227   : Generation(rs, initial_size),
228     _promotion_failed(false),
229     _promo_failure_drain_in_progress(false),
230     _string_dedup_requests()
231 {
232   MemRegion cmr((HeapWord*)_virtual_space.low(),
233                 (HeapWord*)_virtual_space.high());
234   SerialHeap* gch = SerialHeap::heap();
235 
236   gch->rem_set()->resize_covered_region(cmr);
237 
238   _eden_space = new ContiguousSpace();
239   _from_space = new ContiguousSpace();
240   _to_space   = new ContiguousSpace();
241 
242   // Compute the maximum eden and survivor space sizes. These sizes
243   // are computed assuming the entire reserved space is committed.
244   // These values are exported as performance counters.
245   uintx size = _virtual_space.reserved_size();
246   _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
247   _max_eden_size = size - (2*_max_survivor_size);
248 
249   // allocate the performance counters
250 
251   // Generation counters -- generation 0, 3 subspaces
252   _gen_counters = new GenerationCounters("new", 0, 3,
253       min_size, max_size, &_virtual_space);
254   _gc_counters = new CollectorCounters(policy, 0);
255 
256   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
257                                       _gen_counters);
258   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
259                                       _gen_counters);
260   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
261                                     _gen_counters);
262 
263   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
264   update_counters();
265   _old_gen = nullptr;
266   _tenuring_threshold = MaxTenuringThreshold;
267   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
268 
269   _ref_processor = nullptr;
270 
271   _gc_timer = new STWGCTimer();
272 
273   _gc_tracer = new DefNewTracer();
274 }
275 
276 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
277                                                 bool clear_space,
278                                                 bool mangle_space) {
279   // If the spaces are being cleared (only done at heap initialization
280   // currently), the survivor spaces need not be empty.
281   // Otherwise, no care is taken for used areas in the survivor spaces
282   // so check.
283   assert(clear_space || (to()->is_empty() && from()->is_empty()),
284     "Initialization of the survivor spaces assumes these are empty");
285 
286   // Compute sizes
287   uintx size = _virtual_space.committed_size();
288   uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
289   uintx eden_size = size - (2*survivor_size);
290   if (eden_size > max_eden_size()) {
291     // Need to reduce eden_size to satisfy the max constraint. The delta needs
292     // to be 2*SpaceAlignment aligned so that both survivors are properly
293     // aligned.
294     uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
295     eden_size     -= eden_delta;
296     survivor_size += eden_delta/2;
297   }
298   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
299 
300   if (eden_size < minimum_eden_size) {
301     // May happen due to 64Kb rounding, if so adjust eden size back up
302     minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
303     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
304     uintx unaligned_survivor_size =
305       align_down(maximum_survivor_size, SpaceAlignment);
306     survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
307     eden_size = size - (2*survivor_size);
308     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
309     assert(eden_size >= minimum_eden_size, "just checking");
310   }
311 
312   char *eden_start = _virtual_space.low();
313   char *from_start = eden_start + eden_size;
314   char *to_start   = from_start + survivor_size;
315   char *to_end     = to_start   + survivor_size;
316 
317   assert(to_end == _virtual_space.high(), "just checking");
318   assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
319   assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
320   assert(is_aligned(to_start, SpaceAlignment),   "checking alignment");
321 
322   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
323   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
324   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
325 
326   // A minimum eden size implies that there is a part of eden that
327   // is being used and that affects the initialization of any
328   // newly formed eden.
329   bool live_in_eden = minimum_eden_size > 0;
330 
331   // Reset the spaces for their new regions.
332   eden()->initialize(edenMR,
333                      clear_space && !live_in_eden,
334                      SpaceDecorator::Mangle);
335   // If clear_space and live_in_eden, we will not have cleared any
336   // portion of eden above its top. This can cause newly
337   // expanded space not to be mangled if using ZapUnusedHeapArea.
338   // We explicitly do such mangling here.
339   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
340     eden()->mangle_unused_area();
341   }
342   from()->initialize(fromMR, clear_space, mangle_space);
343   to()->initialize(toMR, clear_space, mangle_space);
344 }
345 
346 void DefNewGeneration::swap_spaces() {
347   ContiguousSpace* s = from();
348   _from_space        = to();
349   _to_space          = s;
350 
351   if (UsePerfData) {
352     CSpaceCounters* c = _from_counters;
353     _from_counters = _to_counters;
354     _to_counters = c;
355   }
356 }
357 
358 bool DefNewGeneration::expand(size_t bytes) {
359   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
360   bool success = _virtual_space.expand_by(bytes);
361   if (success && ZapUnusedHeapArea) {
362     // Mangle newly committed space immediately because it
363     // can be done here more simply that after the new
364     // spaces have been computed.
365     HeapWord* new_high = (HeapWord*) _virtual_space.high();
366     MemRegion mangle_region(prev_high, new_high);
367     SpaceMangler::mangle_region(mangle_region);
368   }
369 
370   // Do not attempt an expand-to-the reserve size.  The
371   // request should properly observe the maximum size of
372   // the generation so an expand-to-reserve should be
373   // unnecessary.  Also a second call to expand-to-reserve
374   // value potentially can cause an undue expansion.
375   // For example if the first expand fail for unknown reasons,
376   // but the second succeeds and expands the heap to its maximum
377   // value.
378   if (GCLocker::is_active()) {
379     log_debug(gc)("Garbage collection disabled, expanded heap instead");
380   }
381 
382   return success;
383 }
384 
385 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
386     size_t thread_increase_size = 0;
387     // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
388     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
389       thread_increase_size = threads_count * NewSizeThreadIncrease;
390     }
391     return thread_increase_size;
392 }
393 
394 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
395                                                     size_t new_size_before,
396                                                     size_t alignment,
397                                                     size_t thread_increase_size) const {
398   size_t desired_new_size = new_size_before;
399 
400   if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
401 
402     // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
403     if (new_size_candidate <= max_uintx - thread_increase_size) {
404       new_size_candidate += thread_increase_size;
405 
406       // 2. Check an overflow at 'align_up'.
407       size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
408       if (new_size_candidate <= aligned_max) {
409         desired_new_size = align_up(new_size_candidate, alignment);
410       }
411     }
412   }
413 
414   return desired_new_size;
415 }
416 
417 void DefNewGeneration::compute_new_size() {
418   // This is called after a GC that includes the old generation, so from-space
419   // will normally be empty.
420   // Note that we check both spaces, since if scavenge failed they revert roles.
421   // If not we bail out (otherwise we would have to relocate the objects).
422   if (!from()->is_empty() || !to()->is_empty()) {
423     return;
424   }
425 
426   SerialHeap* gch = SerialHeap::heap();
427 
428   size_t old_size = gch->old_gen()->capacity();
429   size_t new_size_before = _virtual_space.committed_size();
430   size_t min_new_size = NewSize;
431   size_t max_new_size = reserved().byte_size();
432   assert(min_new_size <= new_size_before &&
433          new_size_before <= max_new_size,
434          "just checking");
435   // All space sizes must be multiples of Generation::GenGrain.
436   size_t alignment = Generation::GenGrain;
437 
438   int threads_count = Threads::number_of_non_daemon_threads();
439   size_t thread_increase_size = calculate_thread_increase_size(threads_count);
440 
441   size_t new_size_candidate = old_size / NewRatio;
442   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
443   // and reverts to previous value if any overflow happens
444   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
445                                                        alignment, thread_increase_size);
446 
447   // Adjust new generation size
448   desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
449   assert(desired_new_size <= max_new_size, "just checking");
450 
451   bool changed = false;
452   if (desired_new_size > new_size_before) {
453     size_t change = desired_new_size - new_size_before;
454     assert(change % alignment == 0, "just checking");
455     if (expand(change)) {
456        changed = true;
457     }
458     // If the heap failed to expand to the desired size,
459     // "changed" will be false.  If the expansion failed
460     // (and at this point it was expected to succeed),
461     // ignore the failure (leaving "changed" as false).
462   }
463   if (desired_new_size < new_size_before && eden()->is_empty()) {
464     // bail out of shrinking if objects in eden
465     size_t change = new_size_before - desired_new_size;
466     assert(change % alignment == 0, "just checking");
467     _virtual_space.shrink_by(change);
468     changed = true;
469   }
470   if (changed) {
471     // The spaces have already been mangled at this point but
472     // may not have been cleared (set top = bottom) and should be.
473     // Mangling was done when the heap was being expanded.
474     compute_space_boundaries(eden()->used(),
475                              SpaceDecorator::Clear,
476                              SpaceDecorator::DontMangle);
477     MemRegion cmr((HeapWord*)_virtual_space.low(),
478                   (HeapWord*)_virtual_space.high());
479     gch->rem_set()->resize_covered_region(cmr);
480 
481     log_debug(gc, ergo, heap)(
482         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
483         new_size_before/K, _virtual_space.committed_size()/K,
484         eden()->capacity()/K, from()->capacity()/K);
485     log_trace(gc, ergo, heap)(
486         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
487           thread_increase_size/K, threads_count);
488       }
489 }
490 
491 void DefNewGeneration::ref_processor_init() {
492   assert(_ref_processor == nullptr, "a reference processor already exists");
493   assert(!_reserved.is_empty(), "empty generation?");
494   _span_based_discoverer.set_span(_reserved);
495   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
496 }
497 
498 size_t DefNewGeneration::capacity() const {
499   return eden()->capacity()
500        + from()->capacity();  // to() is only used during scavenge
501 }
502 
503 
504 size_t DefNewGeneration::used() const {
505   return eden()->used()
506        + from()->used();      // to() is only used during scavenge
507 }
508 
509 
510 size_t DefNewGeneration::free() const {
511   return eden()->free()
512        + from()->free();      // to() is only used during scavenge
513 }
514 
515 size_t DefNewGeneration::max_capacity() const {
516   const size_t reserved_bytes = reserved().byte_size();
517   return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
518 }
519 
520 bool DefNewGeneration::is_in(const void* p) const {
521   return eden()->is_in(p)
522       || from()->is_in(p)
523       || to()  ->is_in(p);
524 }
525 
526 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
527   return eden()->free();
528 }
529 
530 size_t DefNewGeneration::capacity_before_gc() const {
531   return eden()->capacity();
532 }
533 
534 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
535   eden()->object_iterate(blk);
536   from()->object_iterate(blk);
537 }
538 
539 // If "p" is in the space, returns the address of the start of the
540 // "block" that contains "p".  We say "block" instead of "object" since
541 // some heaps may not pack objects densely; a chunk may either be an
542 // object or a non-object.  If "p" is not in the space, return null.
543 // Very general, slow implementation.
544 static HeapWord* block_start_const(const ContiguousSpace* cs, const void* p) {
545   assert(MemRegion(cs->bottom(), cs->end()).contains(p),
546          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
547          p2i(p), p2i(cs->bottom()), p2i(cs->end()));
548   if (p >= cs->top()) {
549     return cs->top();
550   } else {
551     HeapWord* last = cs->bottom();
552     HeapWord* cur = last;
553     while (cur <= p) {
554       last = cur;
555       cur += cast_to_oop(cur)->size();
556     }
557     assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
558     return last;
559   }
560 }
561 
562 HeapWord* DefNewGeneration::block_start(const void* p) const {
563   if (eden()->is_in_reserved(p)) {
564     return block_start_const(eden(), p);
565   }
566   if (from()->is_in_reserved(p)) {
567     return block_start_const(from(), p);
568   }
569   assert(to()->is_in_reserved(p), "inv");
570   return block_start_const(to(), p);
571 }
572 
573 void DefNewGeneration::adjust_desired_tenuring_threshold() {
574   // Set the desired survivor size to half the real survivor space
575   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
576   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
577 
578   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
579 
580   if (UsePerfData) {
581     GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
582     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
583     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
584   }
585 
586   age_table()->print_age_table();
587 }
588 
589 bool DefNewGeneration::collect(bool clear_all_soft_refs) {
590   SerialHeap* heap = SerialHeap::heap();
591 
592   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
593   _gc_timer->register_gc_start();
594   _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
595   _ref_processor->start_discovery(clear_all_soft_refs);
596 
597   _old_gen = heap->old_gen();
598 
599   init_assuming_no_promotion_failure();
600 
601   GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
602 
603   heap->trace_heap_before_gc(_gc_tracer);
604 
605   // These can be shared for all code paths
606   IsAliveClosure is_alive(this);
607 
608   age_table()->clear();
609   to()->clear(SpaceDecorator::Mangle);
610 
611   YoungGenScanClosure young_gen_cl(this);
612   OldGenScanClosure   old_gen_cl(this);
613 
614   FastEvacuateFollowersClosure evacuate_followers(heap,
615                                                   &young_gen_cl,
616                                                   &old_gen_cl);
617 
618   {
619     StrongRootsScope srs(0);
620     RootScanClosure root_cl{this};
621     CLDScanClosure cld_cl{this};
622 
623     MarkingNMethodClosure code_cl(&root_cl,
624                                   NMethodToOopClosure::FixRelocations,
625                                   false /* keepalive_nmethods */);
626 
627     HeapWord* saved_top_in_old_gen = _old_gen->space()->top();
628     heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
629                         &root_cl,
630                         &cld_cl,
631                         &cld_cl,
632                         &code_cl);
633 
634     _old_gen->scan_old_to_young_refs(saved_top_in_old_gen);
635   }
636 
637   // "evacuate followers".
638   evacuate_followers.do_void();
639 
640   {
641     // Reference processing
642     KeepAliveClosure keep_alive(this);
643     ReferenceProcessor* rp = ref_processor();
644     ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
645     SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
646     const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
647     _gc_tracer->report_gc_reference_stats(stats);
648     _gc_tracer->report_tenuring_threshold(tenuring_threshold());
649     pt.print_all_references();
650   }
651 
652   {
653     AdjustWeakRootClosure cl{this};
654     WeakProcessor::weak_oops_do(&is_alive, &cl);
655   }
656 
657   _string_dedup_requests.flush();
658 
659   if (!_promotion_failed) {
660     // Swap the survivor spaces.
661     eden()->clear(SpaceDecorator::Mangle);
662     from()->clear(SpaceDecorator::Mangle);
663     swap_spaces();
664 
665     assert(to()->is_empty(), "to space should be empty now");
666 
667     adjust_desired_tenuring_threshold();
668   } else {
669     assert(_promo_failure_scan_stack.is_empty(), "post condition");
670     _promo_failure_scan_stack.clear(true); // Clear cached segments.
671 
672     remove_forwarding_pointers();
673     log_info(gc, promotion)("Promotion failed");
674 
675     _gc_tracer->report_promotion_failed(_promotion_failed_info);
676 
677     // Reset the PromotionFailureALot counters.
678     NOT_PRODUCT(heap->reset_promotion_should_fail();)
679   }
680 
681   heap->trace_heap_after_gc(_gc_tracer);
682 
683   _gc_timer->register_gc_end();
684 
685   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
686 
687   return !_promotion_failed;
688 }
689 
690 void DefNewGeneration::init_assuming_no_promotion_failure() {
691   _promotion_failed = false;
692   _promotion_failed_info.reset();
693 }
694 
695 void DefNewGeneration::remove_forwarding_pointers() {
696   assert(_promotion_failed, "precondition");
697 
698   // Will enter Full GC soon due to failed promotion. Must reset the mark word
699   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
700   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
701   struct ResetForwardedMarkWord : ObjectClosure {
702     void do_object(oop obj) override {
703       if (obj->is_self_forwarded()) {
704         obj->unset_self_forwarded();
705       } else if (obj->is_forwarded()) {
706         // To restore the klass-bits in the header.
707         // Needed for object iteration to work properly.
708         obj->set_mark(obj->forwardee()->prototype_mark());
709       }
710     }
711   } cl;
712   eden()->object_iterate(&cl);
713   from()->object_iterate(&cl);
714 }
715 
716 void DefNewGeneration::handle_promotion_failure(oop old) {
717   log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
718 
719   _promotion_failed = true;
720   _promotion_failed_info.register_copy_failure(old->size());
721 
722   ContinuationGCSupport::transform_stack_chunk(old);
723 
724   // forward to self
725   old->forward_to_self();
726 
727   _promo_failure_scan_stack.push(old);
728 
729   if (!_promo_failure_drain_in_progress) {
730     // prevent recursion in copy_to_survivor_space()
731     _promo_failure_drain_in_progress = true;
732     drain_promo_failure_scan_stack();
733     _promo_failure_drain_in_progress = false;
734   }
735 }
736 
737 oop DefNewGeneration::copy_to_survivor_space(oop old) {
738   assert(is_in_reserved(old) && !old->is_forwarded(),
739          "shouldn't be scavenging this oop");
740   size_t s = old->size();
741   oop obj = nullptr;
742 
743   // Try allocating obj in to-space (unless too old)
744   if (old->age() < tenuring_threshold()) {
745     obj = cast_to_oop(to()->allocate(s));
746   }
747 
748   bool new_obj_is_tenured = false;
749   // Otherwise try allocating obj tenured
750   if (obj == nullptr) {
751     obj = _old_gen->allocate_for_promotion(old, s);
752     if (obj == nullptr) {
753       handle_promotion_failure(old);
754       return old;
755     }
756 
757     new_obj_is_tenured = true;
758   }
759 
760   // Prefetch beyond obj
761   const intx interval = PrefetchCopyIntervalInBytes;
762   Prefetch::write(obj, interval);
763 
764   // Copy obj
765   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
766 
767   ContinuationGCSupport::transform_stack_chunk(obj);
768 
769   if (!new_obj_is_tenured) {
770     // Increment age if obj still in new generation
771     obj->incr_age();
772     age_table()->add(obj, s);
773   }
774 
775   // Done, insert forward pointer to obj in this header
776   old->forward_to(obj);
777 
778   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
779     // Record old; request adds a new weak reference, which reference
780     // processing expects to refer to a from-space object.
781     _string_dedup_requests.add(old);
782   }
783   return obj;
784 }
785 
786 void DefNewGeneration::drain_promo_failure_scan_stack() {
787   PromoteFailureClosure cl{this};
788   while (!_promo_failure_scan_stack.is_empty()) {
789      oop obj = _promo_failure_scan_stack.pop();
790      obj->oop_iterate(&cl);
791   }
792 }
793 
794 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
795   if (_promotion_failed) {
796     return;
797   }
798 
799   const size_t MinFreeScratchWords = 100;
800 
801   ContiguousSpace* to_space = to();
802   const size_t free_words = pointer_delta(to_space->end(), to_space->top());
803   if (free_words >= MinFreeScratchWords) {
804     scratch = to_space->top();
805     num_words = free_words;
806   }
807 }
808 
809 void DefNewGeneration::reset_scratch() {
810   // If contributing scratch in to_space, mangle all of
811   // to_space if ZapUnusedHeapArea.  This is needed because
812   // top is not maintained while using to-space as scratch.
813   if (ZapUnusedHeapArea) {
814     to()->mangle_unused_area();
815   }
816 }
817 
818 void DefNewGeneration::gc_epilogue(bool full) {
819   assert(!GCLocker::is_active(), "We should not be executing here");
820   // update the generation and space performance counters
821   update_counters();
822 }
823 
824 void DefNewGeneration::update_counters() {
825   if (UsePerfData) {
826     _eden_counters->update_all();
827     _from_counters->update_all();
828     _to_counters->update_all();
829     _gen_counters->update_all();
830   }
831 }
832 
833 void DefNewGeneration::verify() {
834   eden()->verify();
835   from()->verify();
836     to()->verify();
837 }
838 
839 void DefNewGeneration::print_on(outputStream* st) const {
840   st->print(" %-10s", name());
841 
842   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
843             capacity()/K, used()/K);
844   st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
845                p2i(_virtual_space.low_boundary()),
846                p2i(_virtual_space.high()),
847                p2i(_virtual_space.high_boundary()));
848 
849   st->print("  eden");
850   eden()->print_on(st);
851   st->print("  from");
852   from()->print_on(st);
853   st->print("  to  ");
854   to()->print_on(st);
855 }
856 
857 HeapWord* DefNewGeneration::allocate(size_t word_size) {
858   // This is the slow-path allocation for the DefNewGeneration.
859   // Most allocations are fast-path in compiled code.
860   // We try to allocate from the eden.  If that works, we are happy.
861   // Note that since DefNewGeneration supports lock-free allocation, we
862   // have to use it here, as well.
863   HeapWord* result = eden()->par_allocate(word_size);
864   return result;
865 }
866 
867 HeapWord* DefNewGeneration::par_allocate(size_t word_size) {
868   return eden()->par_allocate(word_size);
869 }
870 
871 size_t DefNewGeneration::tlab_capacity() const {
872   return eden()->capacity();
873 }
874 
875 size_t DefNewGeneration::tlab_used() const {
876   return eden()->used();
877 }
878 
879 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
880   return unsafe_max_alloc_nogc();
881 }