1 /*
   2  * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/cardTableRS.hpp"
  27 #include "gc/serial/defNewGeneration.inline.hpp"
  28 #include "gc/serial/serialGcRefProcProxyTask.hpp"
  29 #include "gc/serial/serialHeap.inline.hpp"
  30 #include "gc/serial/serialStringDedup.inline.hpp"
  31 #include "gc/serial/tenuredGeneration.hpp"
  32 #include "gc/shared/adaptiveSizePolicy.hpp"
  33 #include "gc/shared/ageTable.inline.hpp"
  34 #include "gc/shared/collectorCounters.hpp"
  35 #include "gc/shared/continuationGCSupport.inline.hpp"
  36 #include "gc/shared/gcArguments.hpp"
  37 #include "gc/shared/gcHeapSummary.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/gcPolicyCounters.hpp"
  40 #include "gc/shared/gcTimer.hpp"
  41 #include "gc/shared/gcTrace.hpp"
  42 #include "gc/shared/gcTraceTime.inline.hpp"
  43 #include "gc/shared/preservedMarks.inline.hpp"
  44 #include "gc/shared/referencePolicy.hpp"
  45 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  46 #include "gc/shared/space.inline.hpp"
  47 #include "gc/shared/spaceDecorator.inline.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/weakProcessor.hpp"
  50 #include "logging/log.hpp"
  51 #include "memory/iterator.inline.hpp"
  52 #include "memory/resourceArea.hpp"
  53 #include "oops/instanceRefKlass.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/java.hpp"
  56 #include "runtime/javaThread.hpp"
  57 #include "runtime/prefetch.inline.hpp"
  58 #include "runtime/threads.hpp"
  59 #include "utilities/align.hpp"
  60 #include "utilities/copy.hpp"
  61 #include "utilities/globalDefinitions.hpp"
  62 #include "utilities/stack.inline.hpp"
  63 
  64 class PromoteFailureClosure : public InHeapScanClosure {
  65   template <typename T>
  66   void do_oop_work(T* p) {
  67     assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
  68     assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
  69 
  70     try_scavenge(p, [] (auto) {});
  71   }
  72 public:
  73   PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
  74 
  75   void do_oop(oop* p)       { do_oop_work(p); }
  76   void do_oop(narrowOop* p) { do_oop_work(p); }
  77 };
  78 
  79 class YoungGenScanClosure : public InHeapScanClosure {
  80   template <typename T>
  81   void do_oop_work(T* p) {
  82     assert(SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "precondition");
  83 
  84     try_scavenge(p, [] (auto) {});
  85   }
  86 public:
  87   YoungGenScanClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
  88 
  89   void do_oop(oop* p)       { do_oop_work(p); }
  90   void do_oop(narrowOop* p) { do_oop_work(p); }
  91 };
  92 
  93 class RootScanClosure : public OffHeapScanClosure {
  94   template <typename T>
  95   void do_oop_work(T* p) {
  96     assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
  97 
  98     try_scavenge(p,  [] (auto) {});
  99   }
 100 public:
 101   RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
 102 
 103   void do_oop(oop* p)       { do_oop_work(p); }
 104   void do_oop(narrowOop* p) { do_oop_work(p); }
 105 };
 106 
 107 class CLDScanClosure: public CLDClosure {
 108 
 109   class CLDOopClosure : public OffHeapScanClosure {
 110     ClassLoaderData* _scanned_cld;
 111 
 112     template <typename T>
 113     void do_oop_work(T* p) {
 114       assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
 115 
 116       try_scavenge(p, [&] (oop new_obj) {
 117         assert(_scanned_cld != nullptr, "inv");
 118         if (is_in_young_gen(new_obj) && !_scanned_cld->has_modified_oops()) {
 119           _scanned_cld->record_modified_oops();
 120         }
 121       });
 122     }
 123 
 124   public:
 125     CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
 126       _scanned_cld(nullptr) {}
 127 
 128     void set_scanned_cld(ClassLoaderData* cld) {
 129       assert(cld == nullptr || _scanned_cld == nullptr, "Must be");
 130       _scanned_cld = cld;
 131     }
 132 
 133     void do_oop(oop* p)       { do_oop_work(p); }
 134     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 135   };
 136 
 137   CLDOopClosure _oop_closure;
 138  public:
 139   CLDScanClosure(DefNewGeneration* g) : _oop_closure(g) {}
 140 
 141   void do_cld(ClassLoaderData* cld) {
 142     // If the cld has not been dirtied we know that there's
 143     // no references into  the young gen and we can skip it.
 144     if (cld->has_modified_oops()) {
 145 
 146       // Tell the closure which CLD is being scanned so that it can be dirtied
 147       // if oops are left pointing into the young gen.
 148       _oop_closure.set_scanned_cld(cld);
 149 
 150       // Clean the cld since we're going to scavenge all the metadata.
 151       cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
 152 
 153       _oop_closure.set_scanned_cld(nullptr);
 154     }
 155   }
 156 };
 157 
 158 class IsAliveClosure: public BoolObjectClosure {
 159   HeapWord*         _young_gen_end;
 160 public:
 161   IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
 162 
 163   bool do_object_b(oop p) {
 164     return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
 165   }
 166 };
 167 
 168 class AdjustWeakRootClosure: public OffHeapScanClosure {
 169   template <class T>
 170   void do_oop_work(T* p) {
 171     DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
 172     assert(!heap->is_in_reserved(p), "outside the heap");
 173 
 174     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
 175     if (is_in_young_gen(obj)) {
 176       assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
 177       assert(obj->is_forwarded(), "forwarded before weak-root-processing");
 178       oop new_obj = obj->forwardee();
 179       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
 180     }
 181   }
 182  public:
 183   AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
 184 
 185   void do_oop(oop* p)       { do_oop_work(p); }
 186   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 187 };
 188 
 189 class KeepAliveClosure: public OopClosure {
 190   DefNewGeneration* _young_gen;
 191   HeapWord*         _young_gen_end;
 192   CardTableRS* _rs;
 193 
 194   bool is_in_young_gen(void* p) const {
 195     return p < _young_gen_end;
 196   }
 197 
 198   template <class T>
 199   void do_oop_work(T* p) {
 200     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
 201 
 202     if (is_in_young_gen(obj)) {
 203       oop new_obj = obj->is_forwarded() ? obj->forwardee()
 204                                         : _young_gen->copy_to_survivor_space(obj);
 205       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
 206 
 207       if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
 208         _rs->inline_write_ref_field_gc(p);
 209       }
 210     }
 211   }
 212 public:
 213   KeepAliveClosure(DefNewGeneration* g) :
 214     _young_gen(g),
 215     _young_gen_end(g->reserved().end()),
 216     _rs(SerialHeap::heap()->rem_set()) {}
 217 
 218   void do_oop(oop* p)       { do_oop_work(p); }
 219   void do_oop(narrowOop* p) { do_oop_work(p); }
 220 };
 221 
 222 class FastEvacuateFollowersClosure: public VoidClosure {
 223   SerialHeap* _heap;
 224   YoungGenScanClosure* _young_cl;
 225   OldGenScanClosure* _old_cl;
 226 public:
 227   FastEvacuateFollowersClosure(SerialHeap* heap,
 228                                YoungGenScanClosure* young_cl,
 229                                OldGenScanClosure* old_cl) :
 230     _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
 231   {}
 232 
 233   void do_void() {
 234     do {
 235       _heap->oop_since_save_marks_iterate(_young_cl, _old_cl);
 236     } while (!_heap->no_allocs_since_save_marks());
 237     guarantee(_heap->young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
 238   }
 239 };
 240 
 241 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 242                                    size_t initial_size,
 243                                    size_t min_size,
 244                                    size_t max_size,
 245                                    const char* policy)
 246   : Generation(rs, initial_size),
 247     _preserved_marks_set(false /* in_c_heap */),
 248     _promo_failure_drain_in_progress(false),
 249     _should_allocate_from_space(false),
 250     _string_dedup_requests()
 251 {
 252   MemRegion cmr((HeapWord*)_virtual_space.low(),
 253                 (HeapWord*)_virtual_space.high());
 254   SerialHeap* gch = SerialHeap::heap();
 255 
 256   gch->rem_set()->resize_covered_region(cmr);
 257 
 258   _eden_space = new ContiguousSpace();
 259   _from_space = new ContiguousSpace();
 260   _to_space   = new ContiguousSpace();
 261 
 262   // Compute the maximum eden and survivor space sizes. These sizes
 263   // are computed assuming the entire reserved space is committed.
 264   // These values are exported as performance counters.
 265   uintx size = _virtual_space.reserved_size();
 266   _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
 267   _max_eden_size = size - (2*_max_survivor_size);
 268 
 269   // allocate the performance counters
 270 
 271   // Generation counters -- generation 0, 3 subspaces
 272   _gen_counters = new GenerationCounters("new", 0, 3,
 273       min_size, max_size, &_virtual_space);
 274   _gc_counters = new CollectorCounters(policy, 0);
 275 
 276   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 277                                       _gen_counters);
 278   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 279                                       _gen_counters);
 280   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 281                                     _gen_counters);
 282 
 283   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 284   update_counters();
 285   _old_gen = nullptr;
 286   _tenuring_threshold = MaxTenuringThreshold;
 287   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 288 
 289   _ref_processor = nullptr;
 290 
 291   _gc_timer = new STWGCTimer();
 292 
 293   _gc_tracer = new DefNewTracer();
 294 }
 295 
 296 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 297                                                 bool clear_space,
 298                                                 bool mangle_space) {
 299   // If the spaces are being cleared (only done at heap initialization
 300   // currently), the survivor spaces need not be empty.
 301   // Otherwise, no care is taken for used areas in the survivor spaces
 302   // so check.
 303   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 304     "Initialization of the survivor spaces assumes these are empty");
 305 
 306   // Compute sizes
 307   uintx size = _virtual_space.committed_size();
 308   uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
 309   uintx eden_size = size - (2*survivor_size);
 310   if (eden_size > max_eden_size()) {
 311     // Need to reduce eden_size to satisfy the max constraint. The delta needs
 312     // to be 2*SpaceAlignment aligned so that both survivors are properly
 313     // aligned.
 314     uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
 315     eden_size     -= eden_delta;
 316     survivor_size += eden_delta/2;
 317   }
 318   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 319 
 320   if (eden_size < minimum_eden_size) {
 321     // May happen due to 64Kb rounding, if so adjust eden size back up
 322     minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
 323     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 324     uintx unaligned_survivor_size =
 325       align_down(maximum_survivor_size, SpaceAlignment);
 326     survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
 327     eden_size = size - (2*survivor_size);
 328     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 329     assert(eden_size >= minimum_eden_size, "just checking");
 330   }
 331 
 332   char *eden_start = _virtual_space.low();
 333   char *from_start = eden_start + eden_size;
 334   char *to_start   = from_start + survivor_size;
 335   char *to_end     = to_start   + survivor_size;
 336 
 337   assert(to_end == _virtual_space.high(), "just checking");
 338   assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
 339   assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
 340   assert(is_aligned(to_start, SpaceAlignment),   "checking alignment");
 341 
 342   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 343   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 344   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 345 
 346   // A minimum eden size implies that there is a part of eden that
 347   // is being used and that affects the initialization of any
 348   // newly formed eden.
 349   bool live_in_eden = minimum_eden_size > 0;
 350 
 351   // If not clearing the spaces, do some checking to verify that
 352   // the space are already mangled.
 353   if (!clear_space) {
 354     // Must check mangling before the spaces are reshaped.  Otherwise,
 355     // the bottom or end of one space may have moved into another
 356     // a failure of the check may not correctly indicate which space
 357     // is not properly mangled.
 358     if (ZapUnusedHeapArea) {
 359       HeapWord* limit = (HeapWord*) _virtual_space.high();
 360       eden()->check_mangled_unused_area(limit);
 361       from()->check_mangled_unused_area(limit);
 362         to()->check_mangled_unused_area(limit);
 363     }
 364   }
 365 
 366   // Reset the spaces for their new regions.
 367   eden()->initialize(edenMR,
 368                      clear_space && !live_in_eden,
 369                      SpaceDecorator::Mangle);
 370   // If clear_space and live_in_eden, we will not have cleared any
 371   // portion of eden above its top. This can cause newly
 372   // expanded space not to be mangled if using ZapUnusedHeapArea.
 373   // We explicitly do such mangling here.
 374   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 375     eden()->mangle_unused_area();
 376   }
 377   from()->initialize(fromMR, clear_space, mangle_space);
 378   to()->initialize(toMR, clear_space, mangle_space);
 379 
 380   // Set next compaction spaces.
 381   eden()->set_next_compaction_space(from());
 382   // The to-space is normally empty before a compaction so need
 383   // not be considered.  The exception is during promotion
 384   // failure handling when to-space can contain live objects.
 385   from()->set_next_compaction_space(nullptr);
 386 }
 387 
 388 void DefNewGeneration::swap_spaces() {
 389   ContiguousSpace* s = from();
 390   _from_space        = to();
 391   _to_space          = s;
 392   eden()->set_next_compaction_space(from());
 393   // The to-space is normally empty before a compaction so need
 394   // not be considered.  The exception is during promotion
 395   // failure handling when to-space can contain live objects.
 396   from()->set_next_compaction_space(nullptr);
 397 
 398   if (UsePerfData) {
 399     CSpaceCounters* c = _from_counters;
 400     _from_counters = _to_counters;
 401     _to_counters = c;
 402   }
 403 }
 404 
 405 bool DefNewGeneration::expand(size_t bytes) {
 406   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 407   bool success = _virtual_space.expand_by(bytes);
 408   if (success && ZapUnusedHeapArea) {
 409     // Mangle newly committed space immediately because it
 410     // can be done here more simply that after the new
 411     // spaces have been computed.
 412     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 413     MemRegion mangle_region(prev_high, new_high);
 414     SpaceMangler::mangle_region(mangle_region);
 415   }
 416 
 417   // Do not attempt an expand-to-the reserve size.  The
 418   // request should properly observe the maximum size of
 419   // the generation so an expand-to-reserve should be
 420   // unnecessary.  Also a second call to expand-to-reserve
 421   // value potentially can cause an undue expansion.
 422   // For example if the first expand fail for unknown reasons,
 423   // but the second succeeds and expands the heap to its maximum
 424   // value.
 425   if (GCLocker::is_active()) {
 426     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 427   }
 428 
 429   return success;
 430 }
 431 
 432 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
 433     size_t thread_increase_size = 0;
 434     // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
 435     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
 436       thread_increase_size = threads_count * NewSizeThreadIncrease;
 437     }
 438     return thread_increase_size;
 439 }
 440 
 441 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
 442                                                     size_t new_size_before,
 443                                                     size_t alignment,
 444                                                     size_t thread_increase_size) const {
 445   size_t desired_new_size = new_size_before;
 446 
 447   if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
 448 
 449     // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
 450     if (new_size_candidate <= max_uintx - thread_increase_size) {
 451       new_size_candidate += thread_increase_size;
 452 
 453       // 2. Check an overflow at 'align_up'.
 454       size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
 455       if (new_size_candidate <= aligned_max) {
 456         desired_new_size = align_up(new_size_candidate, alignment);
 457       }
 458     }
 459   }
 460 
 461   return desired_new_size;
 462 }
 463 
 464 void DefNewGeneration::compute_new_size() {
 465   // This is called after a GC that includes the old generation, so from-space
 466   // will normally be empty.
 467   // Note that we check both spaces, since if scavenge failed they revert roles.
 468   // If not we bail out (otherwise we would have to relocate the objects).
 469   if (!from()->is_empty() || !to()->is_empty()) {
 470     return;
 471   }
 472 
 473   SerialHeap* gch = SerialHeap::heap();
 474 
 475   size_t old_size = gch->old_gen()->capacity();
 476   size_t new_size_before = _virtual_space.committed_size();
 477   size_t min_new_size = NewSize;
 478   size_t max_new_size = reserved().byte_size();
 479   assert(min_new_size <= new_size_before &&
 480          new_size_before <= max_new_size,
 481          "just checking");
 482   // All space sizes must be multiples of Generation::GenGrain.
 483   size_t alignment = Generation::GenGrain;
 484 
 485   int threads_count = Threads::number_of_non_daemon_threads();
 486   size_t thread_increase_size = calculate_thread_increase_size(threads_count);
 487 
 488   size_t new_size_candidate = old_size / NewRatio;
 489   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
 490   // and reverts to previous value if any overflow happens
 491   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
 492                                                        alignment, thread_increase_size);
 493 
 494   // Adjust new generation size
 495   desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
 496   assert(desired_new_size <= max_new_size, "just checking");
 497 
 498   bool changed = false;
 499   if (desired_new_size > new_size_before) {
 500     size_t change = desired_new_size - new_size_before;
 501     assert(change % alignment == 0, "just checking");
 502     if (expand(change)) {
 503        changed = true;
 504     }
 505     // If the heap failed to expand to the desired size,
 506     // "changed" will be false.  If the expansion failed
 507     // (and at this point it was expected to succeed),
 508     // ignore the failure (leaving "changed" as false).
 509   }
 510   if (desired_new_size < new_size_before && eden()->is_empty()) {
 511     // bail out of shrinking if objects in eden
 512     size_t change = new_size_before - desired_new_size;
 513     assert(change % alignment == 0, "just checking");
 514     _virtual_space.shrink_by(change);
 515     changed = true;
 516   }
 517   if (changed) {
 518     // The spaces have already been mangled at this point but
 519     // may not have been cleared (set top = bottom) and should be.
 520     // Mangling was done when the heap was being expanded.
 521     compute_space_boundaries(eden()->used(),
 522                              SpaceDecorator::Clear,
 523                              SpaceDecorator::DontMangle);
 524     MemRegion cmr((HeapWord*)_virtual_space.low(),
 525                   (HeapWord*)_virtual_space.high());
 526     gch->rem_set()->resize_covered_region(cmr);
 527 
 528     log_debug(gc, ergo, heap)(
 529         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 530         new_size_before/K, _virtual_space.committed_size()/K,
 531         eden()->capacity()/K, from()->capacity()/K);
 532     log_trace(gc, ergo, heap)(
 533         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
 534           thread_increase_size/K, threads_count);
 535       }
 536 }
 537 
 538 void DefNewGeneration::ref_processor_init() {
 539   assert(_ref_processor == nullptr, "a reference processor already exists");
 540   assert(!_reserved.is_empty(), "empty generation?");
 541   _span_based_discoverer.set_span(_reserved);
 542   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
 543 }
 544 
 545 size_t DefNewGeneration::capacity() const {
 546   return eden()->capacity()
 547        + from()->capacity();  // to() is only used during scavenge
 548 }
 549 
 550 
 551 size_t DefNewGeneration::used() const {
 552   return eden()->used()
 553        + from()->used();      // to() is only used during scavenge
 554 }
 555 
 556 
 557 size_t DefNewGeneration::free() const {
 558   return eden()->free()
 559        + from()->free();      // to() is only used during scavenge
 560 }
 561 
 562 size_t DefNewGeneration::max_capacity() const {
 563   const size_t reserved_bytes = reserved().byte_size();
 564   return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
 565 }
 566 
 567 bool DefNewGeneration::is_in(const void* p) const {
 568   return eden()->is_in(p)
 569       || from()->is_in(p)
 570       || to()  ->is_in(p);
 571 }
 572 
 573 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 574   return eden()->free();
 575 }
 576 
 577 size_t DefNewGeneration::capacity_before_gc() const {
 578   return eden()->capacity();
 579 }
 580 
 581 size_t DefNewGeneration::contiguous_available() const {
 582   return eden()->free();
 583 }
 584 
 585 
 586 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 587   eden()->object_iterate(blk);
 588   from()->object_iterate(blk);
 589 }
 590 
 591 HeapWord* DefNewGeneration::block_start(const void* p) const {
 592   if (eden()->is_in_reserved(p)) {
 593     return eden()->block_start_const(p);
 594   }
 595   if (from()->is_in_reserved(p)) {
 596     return from()->block_start_const(p);
 597   }
 598   assert(to()->is_in_reserved(p), "inv");
 599   return to()->block_start_const(p);
 600 }
 601 
 602 // The last collection bailed out, we are running out of heap space,
 603 // so we try to allocate the from-space, too.
 604 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 605   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
 606 
 607   // If the Heap_lock is not locked by this thread, this will be called
 608   // again later with the Heap_lock held.
 609   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
 610 
 611   HeapWord* result = nullptr;
 612   if (do_alloc) {
 613     result = from()->allocate(size);
 614   }
 615 
 616   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
 617                         size,
 618                         SerialHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
 619                           "true" : "false",
 620                         Heap_lock->is_locked() ? "locked" : "unlocked",
 621                         from()->free(),
 622                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
 623                         do_alloc ? "  Heap_lock is not owned by self" : "",
 624                         result == nullptr ? "null" : "object");
 625 
 626   return result;
 627 }
 628 
 629 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, bool is_tlab) {
 630   // We don't attempt to expand the young generation (but perhaps we should.)
 631   return allocate(size, is_tlab);
 632 }
 633 
 634 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 635   // Set the desired survivor size to half the real survivor space
 636   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
 637   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
 638 
 639   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
 640 
 641   if (UsePerfData) {
 642     GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
 643     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
 644     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
 645   }
 646 
 647   age_table()->print_age_table(_tenuring_threshold);
 648 }
 649 
 650 void DefNewGeneration::collect(bool   full,
 651                                bool   clear_all_soft_refs,
 652                                size_t size,
 653                                bool   is_tlab) {
 654   assert(full || size > 0, "otherwise we don't want to collect");
 655 
 656   SerialHeap* heap = SerialHeap::heap();
 657 
 658   // If the next generation is too full to accommodate promotion
 659   // from this generation, pass on collection; let the next generation
 660   // do it.
 661   if (!collection_attempt_is_safe()) {
 662     log_trace(gc)(":: Collection attempt not safe ::");
 663     heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 664     return;
 665   }
 666   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 667   _gc_timer->register_gc_start();
 668   _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
 669   _ref_processor->start_discovery(clear_all_soft_refs);
 670 
 671   _old_gen = heap->old_gen();
 672 
 673   init_assuming_no_promotion_failure();
 674 
 675   GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
 676 
 677   heap->trace_heap_before_gc(_gc_tracer);
 678 
 679   // These can be shared for all code paths
 680   IsAliveClosure is_alive(this);
 681 
 682   age_table()->clear();
 683   to()->clear(SpaceDecorator::Mangle);
 684   // The preserved marks should be empty at the start of the GC.
 685   _preserved_marks_set.init(1);
 686 
 687   assert(heap->no_allocs_since_save_marks(),
 688          "save marks have not been newly set.");
 689 
 690   YoungGenScanClosure young_gen_cl(this);
 691   OldGenScanClosure   old_gen_cl(this);
 692 
 693   FastEvacuateFollowersClosure evacuate_followers(heap,
 694                                                   &young_gen_cl,
 695                                                   &old_gen_cl);
 696 
 697   assert(heap->no_allocs_since_save_marks(),
 698          "save marks have not been newly set.");
 699 
 700   {
 701     StrongRootsScope srs(0);
 702     RootScanClosure root_cl{this};
 703     CLDScanClosure cld_cl{this};
 704 
 705     MarkingCodeBlobClosure code_cl(&root_cl,
 706                                    CodeBlobToOopClosure::FixRelocations,
 707                                    false /* keepalive_nmethods */);
 708 
 709     heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
 710                         &root_cl,
 711                         &cld_cl,
 712                         &cld_cl,
 713                         &code_cl);
 714 
 715     _old_gen->scan_old_to_young_refs();
 716   }
 717 
 718   // "evacuate followers".
 719   evacuate_followers.do_void();
 720 
 721   {
 722     // Reference processing
 723     KeepAliveClosure keep_alive(this);
 724     ReferenceProcessor* rp = ref_processor();
 725     ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
 726     SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
 727     const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
 728     _gc_tracer->report_gc_reference_stats(stats);
 729     _gc_tracer->report_tenuring_threshold(tenuring_threshold());
 730     pt.print_all_references();
 731   }
 732   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
 733 
 734   {
 735     AdjustWeakRootClosure cl{this};
 736     WeakProcessor::weak_oops_do(&is_alive, &cl);
 737   }
 738 
 739   // Verify that the usage of keep_alive didn't copy any objects.
 740   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
 741 
 742   _string_dedup_requests.flush();
 743 
 744   if (!_promotion_failed) {
 745     // Swap the survivor spaces.
 746     eden()->clear(SpaceDecorator::Mangle);
 747     from()->clear(SpaceDecorator::Mangle);
 748     if (ZapUnusedHeapArea) {
 749       // This is now done here because of the piece-meal mangling which
 750       // can check for valid mangling at intermediate points in the
 751       // collection(s).  When a young collection fails to collect
 752       // sufficient space resizing of the young generation can occur
 753       // an redistribute the spaces in the young generation.  Mangle
 754       // here so that unzapped regions don't get distributed to
 755       // other spaces.
 756       to()->mangle_unused_area();
 757     }
 758     swap_spaces();
 759 
 760     assert(to()->is_empty(), "to space should be empty now");
 761 
 762     adjust_desired_tenuring_threshold();
 763 
 764     assert(!heap->incremental_collection_failed(), "Should be clear");
 765   } else {
 766     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 767     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 768 
 769     remove_forwarding_pointers();
 770     log_info(gc, promotion)("Promotion failed");
 771     // Add to-space to the list of space to compact
 772     // when a promotion failure has occurred.  In that
 773     // case there can be live objects in to-space
 774     // as a result of a partial evacuation of eden
 775     // and from-space.
 776     swap_spaces();   // For uniformity wrt ParNewGeneration.
 777     from()->set_next_compaction_space(to());
 778     heap->set_incremental_collection_failed();
 779 
 780     _gc_tracer->report_promotion_failed(_promotion_failed_info);
 781 
 782     // Reset the PromotionFailureALot counters.
 783     NOT_PRODUCT(heap->reset_promotion_should_fail();)
 784   }
 785   // We should have processed and cleared all the preserved marks.
 786   _preserved_marks_set.reclaim();
 787 
 788   heap->trace_heap_after_gc(_gc_tracer);
 789 
 790   _gc_timer->register_gc_end();
 791 
 792   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 793 }
 794 
 795 void DefNewGeneration::init_assuming_no_promotion_failure() {
 796   _promotion_failed = false;
 797   _promotion_failed_info.reset();
 798   from()->set_next_compaction_space(nullptr);
 799 }
 800 
 801 void DefNewGeneration::remove_forwarding_pointers() {
 802   assert(_promotion_failed, "precondition");
 803 
 804   // Will enter Full GC soon due to failed promotion. Must reset the mark word
 805   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
 806   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
 807   struct ResetForwardedMarkWord : ObjectClosure {
 808     void do_object(oop obj) override {
 809       if (obj->is_forwarded()) {
 810         obj->forward_safe_init_mark();
 811       }
 812     }
 813   } cl;
 814   eden()->object_iterate(&cl);
 815   from()->object_iterate(&cl);
 816 
 817   restore_preserved_marks();
 818 }
 819 
 820 void DefNewGeneration::restore_preserved_marks() {
 821   _preserved_marks_set.restore(nullptr);
 822 }
 823 
 824 void DefNewGeneration::handle_promotion_failure(oop old) {
 825   log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
 826 
 827   _promotion_failed = true;
 828   _promotion_failed_info.register_copy_failure(old->size());
 829   _preserved_marks_set.get()->push_if_necessary(old, old->mark());
 830 
 831   ContinuationGCSupport::transform_stack_chunk(old);
 832 
 833   // forward to self
 834   old->forward_to_self();
 835 
 836   _promo_failure_scan_stack.push(old);
 837 
 838   if (!_promo_failure_drain_in_progress) {
 839     // prevent recursion in copy_to_survivor_space()
 840     _promo_failure_drain_in_progress = true;
 841     drain_promo_failure_scan_stack();
 842     _promo_failure_drain_in_progress = false;
 843   }
 844 }
 845 
 846 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 847   assert(is_in_reserved(old) && !old->is_forwarded(),
 848          "shouldn't be scavenging this oop");
 849   size_t s = old->size();
 850   oop obj = nullptr;
 851 
 852   // Try allocating obj in to-space (unless too old)
 853   if (old->age() < tenuring_threshold()) {
 854     obj = cast_to_oop(to()->allocate(s));
 855   }
 856 
 857   bool new_obj_is_tenured = false;
 858   // Otherwise try allocating obj tenured
 859   if (obj == nullptr) {
 860     obj = _old_gen->promote(old, s);
 861     if (obj == nullptr) {
 862       handle_promotion_failure(old);
 863       return old;
 864     }
 865 
 866     ContinuationGCSupport::transform_stack_chunk(obj);
 867 
 868     new_obj_is_tenured = true;
 869   } else {
 870     // Prefetch beyond obj
 871     const intx interval = PrefetchCopyIntervalInBytes;
 872     Prefetch::write(obj, interval);
 873 
 874     // Copy obj
 875     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
 876 
 877     ContinuationGCSupport::transform_stack_chunk(obj);
 878 
 879     // Increment age if obj still in new generation
 880     obj->incr_age();
 881     age_table()->add(obj, s);
 882   }
 883 
 884   // Done, insert forward pointer to obj in this header
 885   old->forward_to(obj);
 886 
 887   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
 888     // Record old; request adds a new weak reference, which reference
 889     // processing expects to refer to a from-space object.
 890     _string_dedup_requests.add(old);
 891   }
 892   return obj;
 893 }
 894 
 895 void DefNewGeneration::drain_promo_failure_scan_stack() {
 896   PromoteFailureClosure cl{this};
 897   while (!_promo_failure_scan_stack.is_empty()) {
 898      oop obj = _promo_failure_scan_stack.pop();
 899      obj->oop_iterate(&cl);
 900   }
 901 }
 902 
 903 void DefNewGeneration::save_marks() {
 904   eden()->set_saved_mark();
 905   to()->set_saved_mark();
 906   from()->set_saved_mark();
 907 }
 908 
 909 
 910 bool DefNewGeneration::no_allocs_since_save_marks() {
 911   assert(eden()->saved_mark_at_top(), "Violated spec - alloc in eden");
 912   assert(from()->saved_mark_at_top(), "Violated spec - alloc in from");
 913   return to()->saved_mark_at_top();
 914 }
 915 
 916 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
 917   if (_promotion_failed) {
 918     return;
 919   }
 920 
 921   const size_t MinFreeScratchWords = 100;
 922 
 923   ContiguousSpace* to_space = to();
 924   const size_t free_words = pointer_delta(to_space->end(), to_space->top());
 925   if (free_words >= MinFreeScratchWords) {
 926     scratch = to_space->top();
 927     num_words = free_words;
 928   }
 929 }
 930 
 931 void DefNewGeneration::reset_scratch() {
 932   // If contributing scratch in to_space, mangle all of
 933   // to_space if ZapUnusedHeapArea.  This is needed because
 934   // top is not maintained while using to-space as scratch.
 935   if (ZapUnusedHeapArea) {
 936     to()->mangle_unused_area_complete();
 937   }
 938 }
 939 
 940 bool DefNewGeneration::collection_attempt_is_safe() {
 941   if (!to()->is_empty()) {
 942     log_trace(gc)(":: to is not empty ::");
 943     return false;
 944   }
 945   if (_old_gen == nullptr) {
 946     _old_gen = SerialHeap::heap()->old_gen();
 947   }
 948   return _old_gen->promotion_attempt_is_safe(used());
 949 }
 950 
 951 void DefNewGeneration::gc_epilogue(bool full) {
 952   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
 953 
 954   assert(!GCLocker::is_active(), "We should not be executing here");
 955   // Check if the heap is approaching full after a collection has
 956   // been done.  Generally the young generation is empty at
 957   // a minimum at the end of a collection.  If it is not, then
 958   // the heap is approaching full.
 959   SerialHeap* gch = SerialHeap::heap();
 960   if (full) {
 961     DEBUG_ONLY(seen_incremental_collection_failed = false;)
 962     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
 963       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
 964                             GCCause::to_string(gch->gc_cause()));
 965       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
 966       set_should_allocate_from_space(); // we seem to be running out of space
 967     } else {
 968       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
 969                             GCCause::to_string(gch->gc_cause()));
 970       gch->clear_incremental_collection_failed(); // We just did a full collection
 971       clear_should_allocate_from_space(); // if set
 972     }
 973   } else {
 974 #ifdef ASSERT
 975     // It is possible that incremental_collection_failed() == true
 976     // here, because an attempted scavenge did not succeed. The policy
 977     // is normally expected to cause a full collection which should
 978     // clear that condition, so we should not be here twice in a row
 979     // with incremental_collection_failed() == true without having done
 980     // a full collection in between.
 981     if (!seen_incremental_collection_failed &&
 982         gch->incremental_collection_failed()) {
 983       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
 984                             GCCause::to_string(gch->gc_cause()));
 985       seen_incremental_collection_failed = true;
 986     } else if (seen_incremental_collection_failed) {
 987       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
 988                             GCCause::to_string(gch->gc_cause()));
 989       seen_incremental_collection_failed = false;
 990     }
 991 #endif // ASSERT
 992   }
 993 
 994   if (ZapUnusedHeapArea) {
 995     eden()->check_mangled_unused_area_complete();
 996     from()->check_mangled_unused_area_complete();
 997     to()->check_mangled_unused_area_complete();
 998   }
 999 
1000   // update the generation and space performance counters
1001   update_counters();
1002   gch->counters()->update_counters();
1003 }
1004 
1005 void DefNewGeneration::record_spaces_top() {
1006   assert(ZapUnusedHeapArea, "Not mangling unused space");
1007   eden()->set_top_for_allocations();
1008   to()->set_top_for_allocations();
1009   from()->set_top_for_allocations();
1010 }
1011 
1012 void DefNewGeneration::update_counters() {
1013   if (UsePerfData) {
1014     _eden_counters->update_all();
1015     _from_counters->update_all();
1016     _to_counters->update_all();
1017     _gen_counters->update_all();
1018   }
1019 }
1020 
1021 void DefNewGeneration::verify() {
1022   eden()->verify();
1023   from()->verify();
1024     to()->verify();
1025 }
1026 
1027 void DefNewGeneration::print_on(outputStream* st) const {
1028   Generation::print_on(st);
1029   st->print("  eden");
1030   eden()->print_on(st);
1031   st->print("  from");
1032   from()->print_on(st);
1033   st->print("  to  ");
1034   to()->print_on(st);
1035 }
1036 
1037 
1038 const char* DefNewGeneration::name() const {
1039   return "def new generation";
1040 }
1041 
1042 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
1043   // This is the slow-path allocation for the DefNewGeneration.
1044   // Most allocations are fast-path in compiled code.
1045   // We try to allocate from the eden.  If that works, we are happy.
1046   // Note that since DefNewGeneration supports lock-free allocation, we
1047   // have to use it here, as well.
1048   HeapWord* result = eden()->par_allocate(word_size);
1049   if (result == nullptr) {
1050     // If the eden is full and the last collection bailed out, we are running
1051     // out of heap space, and we try to allocate the from-space, too.
1052     // allocate_from_space can't be inlined because that would introduce a
1053     // circular dependency at compile time.
1054     result = allocate_from_space(word_size);
1055   }
1056   return result;
1057 }
1058 
1059 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1060                                          bool is_tlab) {
1061   return eden()->par_allocate(word_size);
1062 }
1063 
1064 size_t DefNewGeneration::tlab_capacity() const {
1065   return eden()->capacity();
1066 }
1067 
1068 size_t DefNewGeneration::tlab_used() const {
1069   return eden()->used();
1070 }
1071 
1072 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1073   return unsafe_max_alloc_nogc();
1074 }