1 /*
   2  * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/cardTableRS.hpp"
  27 #include "gc/serial/defNewGeneration.inline.hpp"
  28 #include "gc/serial/serialGcRefProcProxyTask.hpp"
  29 #include "gc/serial/serialHeap.inline.hpp"
  30 #include "gc/serial/serialStringDedup.inline.hpp"
  31 #include "gc/serial/tenuredGeneration.hpp"
  32 #include "gc/shared/adaptiveSizePolicy.hpp"
  33 #include "gc/shared/ageTable.inline.hpp"
  34 #include "gc/shared/collectorCounters.hpp"
  35 #include "gc/shared/continuationGCSupport.inline.hpp"
  36 #include "gc/shared/gcArguments.hpp"
  37 #include "gc/shared/gcHeapSummary.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/gcPolicyCounters.hpp"
  40 #include "gc/shared/gcTimer.hpp"
  41 #include "gc/shared/gcTrace.hpp"
  42 #include "gc/shared/gcTraceTime.inline.hpp"
  43 #include "gc/shared/preservedMarks.inline.hpp"
  44 #include "gc/shared/referencePolicy.hpp"
  45 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  46 #include "gc/shared/space.inline.hpp"
  47 #include "gc/shared/spaceDecorator.inline.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/weakProcessor.hpp"
  50 #include "logging/log.hpp"
  51 #include "memory/iterator.inline.hpp"
  52 #include "memory/resourceArea.hpp"
  53 #include "oops/instanceRefKlass.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/java.hpp"
  56 #include "runtime/javaThread.hpp"
  57 #include "runtime/prefetch.inline.hpp"
  58 #include "runtime/threads.hpp"
  59 #include "utilities/align.hpp"
  60 #include "utilities/copy.hpp"
  61 #include "utilities/globalDefinitions.hpp"
  62 #include "utilities/stack.inline.hpp"
  63 
  64 class PromoteFailureClosure : public InHeapScanClosure {
  65   template <typename T>
  66   void do_oop_work(T* p) {
  67     assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
  68     assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
  69 
  70     try_scavenge(p, [] (auto) {});
  71   }
  72 public:
  73   PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
  74 
  75   void do_oop(oop* p)       { do_oop_work(p); }
  76   void do_oop(narrowOop* p) { do_oop_work(p); }
  77 };
  78 
  79 class RootScanClosure : public OffHeapScanClosure {
  80   template <typename T>
  81   void do_oop_work(T* p) {
  82     assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
  83 
  84     try_scavenge(p,  [] (auto) {});
  85   }
  86 public:
  87   RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
  88 
  89   void do_oop(oop* p)       { do_oop_work(p); }
  90   void do_oop(narrowOop* p) { do_oop_work(p); }
  91 };
  92 
  93 class CLDScanClosure: public CLDClosure {
  94 
  95   class CLDOopClosure : public OffHeapScanClosure {
  96     ClassLoaderData* _scanned_cld;
  97 
  98     template <typename T>
  99     void do_oop_work(T* p) {
 100       assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
 101 
 102       try_scavenge(p, [&] (oop new_obj) {
 103         assert(_scanned_cld != nullptr, "inv");
 104         if (is_in_young_gen(new_obj) && !_scanned_cld->has_modified_oops()) {
 105           _scanned_cld->record_modified_oops();
 106         }
 107       });
 108     }
 109 
 110   public:
 111     CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
 112       _scanned_cld(nullptr) {}
 113 
 114     void set_scanned_cld(ClassLoaderData* cld) {
 115       assert(cld == nullptr || _scanned_cld == nullptr, "Must be");
 116       _scanned_cld = cld;
 117     }
 118 
 119     void do_oop(oop* p)       { do_oop_work(p); }
 120     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 121   };
 122 
 123   CLDOopClosure _oop_closure;
 124  public:
 125   CLDScanClosure(DefNewGeneration* g) : _oop_closure(g) {}
 126 
 127   void do_cld(ClassLoaderData* cld) {
 128     // If the cld has not been dirtied we know that there's
 129     // no references into  the young gen and we can skip it.
 130     if (cld->has_modified_oops()) {
 131 
 132       // Tell the closure which CLD is being scanned so that it can be dirtied
 133       // if oops are left pointing into the young gen.
 134       _oop_closure.set_scanned_cld(cld);
 135 
 136       // Clean the cld since we're going to scavenge all the metadata.
 137       cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
 138 
 139       _oop_closure.set_scanned_cld(nullptr);
 140     }
 141   }
 142 };
 143 
 144 class IsAliveClosure: public BoolObjectClosure {
 145   HeapWord*         _young_gen_end;
 146 public:
 147   IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
 148 
 149   bool do_object_b(oop p) {
 150     return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
 151   }
 152 };
 153 
 154 class AdjustWeakRootClosure: public OffHeapScanClosure {
 155   template <class T>
 156   void do_oop_work(T* p) {
 157     DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
 158     assert(!heap->is_in_reserved(p), "outside the heap");
 159 
 160     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
 161     if (is_in_young_gen(obj)) {
 162       assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
 163       assert(obj->is_forwarded(), "forwarded before weak-root-processing");
 164       oop new_obj = obj->forwardee();
 165       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
 166     }
 167   }
 168  public:
 169   AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
 170 
 171   void do_oop(oop* p)       { do_oop_work(p); }
 172   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 173 };
 174 
 175 class KeepAliveClosure: public OopClosure {
 176   DefNewGeneration* _young_gen;
 177   HeapWord*         _young_gen_end;
 178   CardTableRS* _rs;
 179 
 180   bool is_in_young_gen(void* p) const {
 181     return p < _young_gen_end;
 182   }
 183 
 184   template <class T>
 185   void do_oop_work(T* p) {
 186     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
 187 
 188     if (is_in_young_gen(obj)) {
 189       oop new_obj = obj->is_forwarded() ? obj->forwardee()
 190                                         : _young_gen->copy_to_survivor_space(obj);
 191       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
 192 
 193       if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
 194         _rs->inline_write_ref_field_gc(p);
 195       }
 196     }
 197   }
 198 public:
 199   KeepAliveClosure(DefNewGeneration* g) :
 200     _young_gen(g),
 201     _young_gen_end(g->reserved().end()),
 202     _rs(SerialHeap::heap()->rem_set()) {}
 203 
 204   void do_oop(oop* p)       { do_oop_work(p); }
 205   void do_oop(narrowOop* p) { do_oop_work(p); }
 206 };
 207 
 208 class FastEvacuateFollowersClosure: public VoidClosure {
 209   SerialHeap* _heap;
 210   YoungGenScanClosure* _young_cl;
 211   OldGenScanClosure* _old_cl;
 212 public:
 213   FastEvacuateFollowersClosure(SerialHeap* heap,
 214                                YoungGenScanClosure* young_cl,
 215                                OldGenScanClosure* old_cl) :
 216     _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
 217   {}
 218 
 219   void do_void() {
 220     _heap->scan_evacuated_objs(_young_cl, _old_cl);
 221   }
 222 };
 223 
 224 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 225                                    size_t initial_size,
 226                                    size_t min_size,
 227                                    size_t max_size,
 228                                    const char* policy)
 229   : Generation(rs, initial_size),
 230     _preserved_marks_set(false /* in_c_heap */),
 231     _promo_failure_drain_in_progress(false),
 232     _should_allocate_from_space(false),
 233     _string_dedup_requests()
 234 {
 235   MemRegion cmr((HeapWord*)_virtual_space.low(),
 236                 (HeapWord*)_virtual_space.high());
 237   SerialHeap* gch = SerialHeap::heap();
 238 
 239   gch->rem_set()->resize_covered_region(cmr);
 240 
 241   _eden_space = new ContiguousSpace();
 242   _from_space = new ContiguousSpace();
 243   _to_space   = new ContiguousSpace();
 244 
 245   // Compute the maximum eden and survivor space sizes. These sizes
 246   // are computed assuming the entire reserved space is committed.
 247   // These values are exported as performance counters.
 248   uintx size = _virtual_space.reserved_size();
 249   _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
 250   _max_eden_size = size - (2*_max_survivor_size);
 251 
 252   // allocate the performance counters
 253 
 254   // Generation counters -- generation 0, 3 subspaces
 255   _gen_counters = new GenerationCounters("new", 0, 3,
 256       min_size, max_size, &_virtual_space);
 257   _gc_counters = new CollectorCounters(policy, 0);
 258 
 259   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 260                                       _gen_counters);
 261   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 262                                       _gen_counters);
 263   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 264                                     _gen_counters);
 265 
 266   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 267   update_counters();
 268   _old_gen = nullptr;
 269   _tenuring_threshold = MaxTenuringThreshold;
 270   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 271 
 272   _ref_processor = nullptr;
 273 
 274   _gc_timer = new STWGCTimer();
 275 
 276   _gc_tracer = new DefNewTracer();
 277 }
 278 
 279 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 280                                                 bool clear_space,
 281                                                 bool mangle_space) {
 282   // If the spaces are being cleared (only done at heap initialization
 283   // currently), the survivor spaces need not be empty.
 284   // Otherwise, no care is taken for used areas in the survivor spaces
 285   // so check.
 286   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 287     "Initialization of the survivor spaces assumes these are empty");
 288 
 289   // Compute sizes
 290   uintx size = _virtual_space.committed_size();
 291   uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
 292   uintx eden_size = size - (2*survivor_size);
 293   if (eden_size > max_eden_size()) {
 294     // Need to reduce eden_size to satisfy the max constraint. The delta needs
 295     // to be 2*SpaceAlignment aligned so that both survivors are properly
 296     // aligned.
 297     uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
 298     eden_size     -= eden_delta;
 299     survivor_size += eden_delta/2;
 300   }
 301   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 302 
 303   if (eden_size < minimum_eden_size) {
 304     // May happen due to 64Kb rounding, if so adjust eden size back up
 305     minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
 306     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 307     uintx unaligned_survivor_size =
 308       align_down(maximum_survivor_size, SpaceAlignment);
 309     survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
 310     eden_size = size - (2*survivor_size);
 311     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 312     assert(eden_size >= minimum_eden_size, "just checking");
 313   }
 314 
 315   char *eden_start = _virtual_space.low();
 316   char *from_start = eden_start + eden_size;
 317   char *to_start   = from_start + survivor_size;
 318   char *to_end     = to_start   + survivor_size;
 319 
 320   assert(to_end == _virtual_space.high(), "just checking");
 321   assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
 322   assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
 323   assert(is_aligned(to_start, SpaceAlignment),   "checking alignment");
 324 
 325   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 326   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 327   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 328 
 329   // A minimum eden size implies that there is a part of eden that
 330   // is being used and that affects the initialization of any
 331   // newly formed eden.
 332   bool live_in_eden = minimum_eden_size > 0;
 333 
 334   // If not clearing the spaces, do some checking to verify that
 335   // the space are already mangled.
 336   if (!clear_space) {
 337     // Must check mangling before the spaces are reshaped.  Otherwise,
 338     // the bottom or end of one space may have moved into another
 339     // a failure of the check may not correctly indicate which space
 340     // is not properly mangled.
 341     if (ZapUnusedHeapArea) {
 342       HeapWord* limit = (HeapWord*) _virtual_space.high();
 343       eden()->check_mangled_unused_area(limit);
 344       from()->check_mangled_unused_area(limit);
 345         to()->check_mangled_unused_area(limit);
 346     }
 347   }
 348 
 349   // Reset the spaces for their new regions.
 350   eden()->initialize(edenMR,
 351                      clear_space && !live_in_eden,
 352                      SpaceDecorator::Mangle);
 353   // If clear_space and live_in_eden, we will not have cleared any
 354   // portion of eden above its top. This can cause newly
 355   // expanded space not to be mangled if using ZapUnusedHeapArea.
 356   // We explicitly do such mangling here.
 357   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 358     eden()->mangle_unused_area();
 359   }
 360   from()->initialize(fromMR, clear_space, mangle_space);
 361   to()->initialize(toMR, clear_space, mangle_space);
 362 }
 363 
 364 void DefNewGeneration::swap_spaces() {
 365   ContiguousSpace* s = from();
 366   _from_space        = to();
 367   _to_space          = s;
 368 
 369   if (UsePerfData) {
 370     CSpaceCounters* c = _from_counters;
 371     _from_counters = _to_counters;
 372     _to_counters = c;
 373   }
 374 }
 375 
 376 bool DefNewGeneration::expand(size_t bytes) {
 377   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 378   bool success = _virtual_space.expand_by(bytes);
 379   if (success && ZapUnusedHeapArea) {
 380     // Mangle newly committed space immediately because it
 381     // can be done here more simply that after the new
 382     // spaces have been computed.
 383     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 384     MemRegion mangle_region(prev_high, new_high);
 385     SpaceMangler::mangle_region(mangle_region);
 386   }
 387 
 388   // Do not attempt an expand-to-the reserve size.  The
 389   // request should properly observe the maximum size of
 390   // the generation so an expand-to-reserve should be
 391   // unnecessary.  Also a second call to expand-to-reserve
 392   // value potentially can cause an undue expansion.
 393   // For example if the first expand fail for unknown reasons,
 394   // but the second succeeds and expands the heap to its maximum
 395   // value.
 396   if (GCLocker::is_active()) {
 397     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 398   }
 399 
 400   return success;
 401 }
 402 
 403 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
 404     size_t thread_increase_size = 0;
 405     // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
 406     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
 407       thread_increase_size = threads_count * NewSizeThreadIncrease;
 408     }
 409     return thread_increase_size;
 410 }
 411 
 412 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
 413                                                     size_t new_size_before,
 414                                                     size_t alignment,
 415                                                     size_t thread_increase_size) const {
 416   size_t desired_new_size = new_size_before;
 417 
 418   if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
 419 
 420     // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
 421     if (new_size_candidate <= max_uintx - thread_increase_size) {
 422       new_size_candidate += thread_increase_size;
 423 
 424       // 2. Check an overflow at 'align_up'.
 425       size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
 426       if (new_size_candidate <= aligned_max) {
 427         desired_new_size = align_up(new_size_candidate, alignment);
 428       }
 429     }
 430   }
 431 
 432   return desired_new_size;
 433 }
 434 
 435 void DefNewGeneration::compute_new_size() {
 436   // This is called after a GC that includes the old generation, so from-space
 437   // will normally be empty.
 438   // Note that we check both spaces, since if scavenge failed they revert roles.
 439   // If not we bail out (otherwise we would have to relocate the objects).
 440   if (!from()->is_empty() || !to()->is_empty()) {
 441     return;
 442   }
 443 
 444   SerialHeap* gch = SerialHeap::heap();
 445 
 446   size_t old_size = gch->old_gen()->capacity();
 447   size_t new_size_before = _virtual_space.committed_size();
 448   size_t min_new_size = NewSize;
 449   size_t max_new_size = reserved().byte_size();
 450   assert(min_new_size <= new_size_before &&
 451          new_size_before <= max_new_size,
 452          "just checking");
 453   // All space sizes must be multiples of Generation::GenGrain.
 454   size_t alignment = Generation::GenGrain;
 455 
 456   int threads_count = Threads::number_of_non_daemon_threads();
 457   size_t thread_increase_size = calculate_thread_increase_size(threads_count);
 458 
 459   size_t new_size_candidate = old_size / NewRatio;
 460   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
 461   // and reverts to previous value if any overflow happens
 462   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
 463                                                        alignment, thread_increase_size);
 464 
 465   // Adjust new generation size
 466   desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
 467   assert(desired_new_size <= max_new_size, "just checking");
 468 
 469   bool changed = false;
 470   if (desired_new_size > new_size_before) {
 471     size_t change = desired_new_size - new_size_before;
 472     assert(change % alignment == 0, "just checking");
 473     if (expand(change)) {
 474        changed = true;
 475     }
 476     // If the heap failed to expand to the desired size,
 477     // "changed" will be false.  If the expansion failed
 478     // (and at this point it was expected to succeed),
 479     // ignore the failure (leaving "changed" as false).
 480   }
 481   if (desired_new_size < new_size_before && eden()->is_empty()) {
 482     // bail out of shrinking if objects in eden
 483     size_t change = new_size_before - desired_new_size;
 484     assert(change % alignment == 0, "just checking");
 485     _virtual_space.shrink_by(change);
 486     changed = true;
 487   }
 488   if (changed) {
 489     // The spaces have already been mangled at this point but
 490     // may not have been cleared (set top = bottom) and should be.
 491     // Mangling was done when the heap was being expanded.
 492     compute_space_boundaries(eden()->used(),
 493                              SpaceDecorator::Clear,
 494                              SpaceDecorator::DontMangle);
 495     MemRegion cmr((HeapWord*)_virtual_space.low(),
 496                   (HeapWord*)_virtual_space.high());
 497     gch->rem_set()->resize_covered_region(cmr);
 498 
 499     log_debug(gc, ergo, heap)(
 500         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 501         new_size_before/K, _virtual_space.committed_size()/K,
 502         eden()->capacity()/K, from()->capacity()/K);
 503     log_trace(gc, ergo, heap)(
 504         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
 505           thread_increase_size/K, threads_count);
 506       }
 507 }
 508 
 509 void DefNewGeneration::ref_processor_init() {
 510   assert(_ref_processor == nullptr, "a reference processor already exists");
 511   assert(!_reserved.is_empty(), "empty generation?");
 512   _span_based_discoverer.set_span(_reserved);
 513   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
 514 }
 515 
 516 size_t DefNewGeneration::capacity() const {
 517   return eden()->capacity()
 518        + from()->capacity();  // to() is only used during scavenge
 519 }
 520 
 521 
 522 size_t DefNewGeneration::used() const {
 523   return eden()->used()
 524        + from()->used();      // to() is only used during scavenge
 525 }
 526 
 527 
 528 size_t DefNewGeneration::free() const {
 529   return eden()->free()
 530        + from()->free();      // to() is only used during scavenge
 531 }
 532 
 533 size_t DefNewGeneration::max_capacity() const {
 534   const size_t reserved_bytes = reserved().byte_size();
 535   return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
 536 }
 537 
 538 bool DefNewGeneration::is_in(const void* p) const {
 539   return eden()->is_in(p)
 540       || from()->is_in(p)
 541       || to()  ->is_in(p);
 542 }
 543 
 544 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 545   return eden()->free();
 546 }
 547 
 548 size_t DefNewGeneration::capacity_before_gc() const {
 549   return eden()->capacity();
 550 }
 551 
 552 size_t DefNewGeneration::contiguous_available() const {
 553   return eden()->free();
 554 }
 555 
 556 
 557 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 558   eden()->object_iterate(blk);
 559   from()->object_iterate(blk);
 560 }
 561 
 562 // If "p" is in the space, returns the address of the start of the
 563 // "block" that contains "p".  We say "block" instead of "object" since
 564 // some heaps may not pack objects densely; a chunk may either be an
 565 // object or a non-object.  If "p" is not in the space, return null.
 566 // Very general, slow implementation.
 567 static HeapWord* block_start_const(const ContiguousSpace* cs, const void* p) {
 568   assert(MemRegion(cs->bottom(), cs->end()).contains(p),
 569          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 570          p2i(p), p2i(cs->bottom()), p2i(cs->end()));
 571   if (p >= cs->top()) {
 572     return cs->top();
 573   } else {
 574     HeapWord* last = cs->bottom();
 575     HeapWord* cur = last;
 576     while (cur <= p) {
 577       last = cur;
 578       cur += cast_to_oop(cur)->size();
 579     }
 580     assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
 581     return last;
 582   }
 583 }
 584 
 585 HeapWord* DefNewGeneration::block_start(const void* p) const {
 586   if (eden()->is_in_reserved(p)) {
 587     return block_start_const(eden(), p);
 588   }
 589   if (from()->is_in_reserved(p)) {
 590     return block_start_const(from(), p);
 591   }
 592   assert(to()->is_in_reserved(p), "inv");
 593   return block_start_const(to(), p);
 594 }
 595 
 596 // The last collection bailed out, we are running out of heap space,
 597 // so we try to allocate the from-space, too.
 598 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 599   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
 600 
 601   // If the Heap_lock is not locked by this thread, this will be called
 602   // again later with the Heap_lock held.
 603   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
 604 
 605   HeapWord* result = nullptr;
 606   if (do_alloc) {
 607     result = from()->allocate(size);
 608   }
 609 
 610   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
 611                         size,
 612                         SerialHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
 613                           "true" : "false",
 614                         Heap_lock->is_locked() ? "locked" : "unlocked",
 615                         from()->free(),
 616                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
 617                         do_alloc ? "  Heap_lock is not owned by self" : "",
 618                         result == nullptr ? "null" : "object");
 619 
 620   return result;
 621 }
 622 
 623 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, bool is_tlab) {
 624   // We don't attempt to expand the young generation (but perhaps we should.)
 625   return allocate(size, is_tlab);
 626 }
 627 
 628 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 629   // Set the desired survivor size to half the real survivor space
 630   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
 631   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
 632 
 633   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
 634 
 635   if (UsePerfData) {
 636     GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
 637     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
 638     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
 639   }
 640 
 641   age_table()->print_age_table();
 642 }
 643 
 644 void DefNewGeneration::collect(bool   full,
 645                                bool   clear_all_soft_refs,
 646                                size_t size,
 647                                bool   is_tlab) {
 648   assert(full || size > 0, "otherwise we don't want to collect");
 649 
 650   SerialHeap* heap = SerialHeap::heap();
 651 
 652   // If the next generation is too full to accommodate promotion
 653   // from this generation, pass on collection; let the next generation
 654   // do it.
 655   if (!collection_attempt_is_safe()) {
 656     log_trace(gc)(":: Collection attempt not safe ::");
 657     heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 658     return;
 659   }
 660   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 661   _gc_timer->register_gc_start();
 662   _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
 663   _ref_processor->start_discovery(clear_all_soft_refs);
 664 
 665   _old_gen = heap->old_gen();
 666 
 667   init_assuming_no_promotion_failure();
 668 
 669   GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
 670 
 671   heap->trace_heap_before_gc(_gc_tracer);
 672 
 673   // These can be shared for all code paths
 674   IsAliveClosure is_alive(this);
 675 
 676   age_table()->clear();
 677   to()->clear(SpaceDecorator::Mangle);
 678   // The preserved marks should be empty at the start of the GC.
 679   _preserved_marks_set.init(1);
 680 
 681   assert(heap->no_allocs_since_save_marks(),
 682          "save marks have not been newly set.");
 683 
 684   YoungGenScanClosure young_gen_cl(this);
 685   OldGenScanClosure   old_gen_cl(this);
 686 
 687   FastEvacuateFollowersClosure evacuate_followers(heap,
 688                                                   &young_gen_cl,
 689                                                   &old_gen_cl);
 690 
 691   assert(heap->no_allocs_since_save_marks(),
 692          "save marks have not been newly set.");
 693 
 694   {
 695     StrongRootsScope srs(0);
 696     RootScanClosure root_cl{this};
 697     CLDScanClosure cld_cl{this};
 698 
 699     MarkingNMethodClosure code_cl(&root_cl,
 700                                   NMethodToOopClosure::FixRelocations,
 701                                   false /* keepalive_nmethods */);
 702 
 703     heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
 704                         &root_cl,
 705                         &cld_cl,
 706                         &cld_cl,
 707                         &code_cl);
 708 
 709     _old_gen->scan_old_to_young_refs();
 710   }
 711 
 712   // "evacuate followers".
 713   evacuate_followers.do_void();
 714 
 715   {
 716     // Reference processing
 717     KeepAliveClosure keep_alive(this);
 718     ReferenceProcessor* rp = ref_processor();
 719     ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
 720     SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
 721     const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
 722     _gc_tracer->report_gc_reference_stats(stats);
 723     _gc_tracer->report_tenuring_threshold(tenuring_threshold());
 724     pt.print_all_references();
 725   }
 726   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
 727 
 728   {
 729     AdjustWeakRootClosure cl{this};
 730     WeakProcessor::weak_oops_do(&is_alive, &cl);
 731   }
 732 
 733   // Verify that the usage of keep_alive didn't copy any objects.
 734   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
 735 
 736   _string_dedup_requests.flush();
 737 
 738   if (!_promotion_failed) {
 739     // Swap the survivor spaces.
 740     eden()->clear(SpaceDecorator::Mangle);
 741     from()->clear(SpaceDecorator::Mangle);
 742     if (ZapUnusedHeapArea) {
 743       // This is now done here because of the piece-meal mangling which
 744       // can check for valid mangling at intermediate points in the
 745       // collection(s).  When a young collection fails to collect
 746       // sufficient space resizing of the young generation can occur
 747       // an redistribute the spaces in the young generation.  Mangle
 748       // here so that unzapped regions don't get distributed to
 749       // other spaces.
 750       to()->mangle_unused_area();
 751     }
 752     swap_spaces();
 753 
 754     assert(to()->is_empty(), "to space should be empty now");
 755 
 756     adjust_desired_tenuring_threshold();
 757 
 758     assert(!heap->incremental_collection_failed(), "Should be clear");
 759   } else {
 760     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 761     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 762 
 763     remove_forwarding_pointers();
 764     log_info(gc, promotion)("Promotion failed");
 765     // Add to-space to the list of space to compact
 766     // when a promotion failure has occurred.  In that
 767     // case there can be live objects in to-space
 768     // as a result of a partial evacuation of eden
 769     // and from-space.
 770     swap_spaces();   // For uniformity wrt ParNewGeneration.
 771     heap->set_incremental_collection_failed();
 772 
 773     _gc_tracer->report_promotion_failed(_promotion_failed_info);
 774 
 775     // Reset the PromotionFailureALot counters.
 776     NOT_PRODUCT(heap->reset_promotion_should_fail();)
 777   }
 778   // We should have processed and cleared all the preserved marks.
 779   _preserved_marks_set.reclaim();
 780 
 781   heap->trace_heap_after_gc(_gc_tracer);
 782 
 783   _gc_timer->register_gc_end();
 784 
 785   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 786 }
 787 
 788 void DefNewGeneration::init_assuming_no_promotion_failure() {
 789   _promotion_failed = false;
 790   _promotion_failed_info.reset();
 791 }
 792 
 793 void DefNewGeneration::remove_forwarding_pointers() {
 794   assert(_promotion_failed, "precondition");
 795 
 796   // Will enter Full GC soon due to failed promotion. Must reset the mark word
 797   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
 798   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
 799   struct ResetForwardedMarkWord : ObjectClosure {
 800     void do_object(oop obj) override {
 801       if (obj->is_forwarded()) {
 802         obj->init_mark();
 803       }
 804     }
 805   } cl;
 806   eden()->object_iterate(&cl);
 807   from()->object_iterate(&cl);
 808 
 809   restore_preserved_marks();
 810 }
 811 
 812 void DefNewGeneration::restore_preserved_marks() {
 813   _preserved_marks_set.restore(nullptr);
 814 }
 815 
 816 void DefNewGeneration::handle_promotion_failure(oop old) {
 817   log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
 818 
 819   _promotion_failed = true;
 820   _promotion_failed_info.register_copy_failure(old->size());
 821   _preserved_marks_set.get()->push_if_necessary(old, old->mark());
 822 
 823   ContinuationGCSupport::transform_stack_chunk(old);
 824 
 825   // forward to self
 826   old->forward_to(old);
 827 
 828   _promo_failure_scan_stack.push(old);
 829 
 830   if (!_promo_failure_drain_in_progress) {
 831     // prevent recursion in copy_to_survivor_space()
 832     _promo_failure_drain_in_progress = true;
 833     drain_promo_failure_scan_stack();
 834     _promo_failure_drain_in_progress = false;
 835   }
 836 }
 837 
 838 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 839   assert(is_in_reserved(old) && !old->is_forwarded(),
 840          "shouldn't be scavenging this oop");
 841   size_t s = old->size();
 842   oop obj = nullptr;
 843 
 844   // Try allocating obj in to-space (unless too old)
 845   if (old->age() < tenuring_threshold()) {
 846     obj = cast_to_oop(to()->allocate(s));
 847   }
 848 
 849   bool new_obj_is_tenured = false;
 850   // Otherwise try allocating obj tenured
 851   if (obj == nullptr) {
 852     obj = _old_gen->promote(old, s);
 853     if (obj == nullptr) {
 854       handle_promotion_failure(old);
 855       return old;
 856     }
 857 
 858     ContinuationGCSupport::transform_stack_chunk(obj);
 859 
 860     new_obj_is_tenured = true;
 861   } else {
 862     // Prefetch beyond obj
 863     const intx interval = PrefetchCopyIntervalInBytes;
 864     Prefetch::write(obj, interval);
 865 
 866     // Copy obj
 867     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
 868 
 869     ContinuationGCSupport::transform_stack_chunk(obj);
 870 
 871     // Increment age if obj still in new generation
 872     obj->incr_age();
 873     age_table()->add(obj, s);
 874   }
 875 
 876   // Done, insert forward pointer to obj in this header
 877   old->forward_to(obj);
 878 
 879   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
 880     // Record old; request adds a new weak reference, which reference
 881     // processing expects to refer to a from-space object.
 882     _string_dedup_requests.add(old);
 883   }
 884   return obj;
 885 }
 886 
 887 void DefNewGeneration::drain_promo_failure_scan_stack() {
 888   PromoteFailureClosure cl{this};
 889   while (!_promo_failure_scan_stack.is_empty()) {
 890      oop obj = _promo_failure_scan_stack.pop();
 891      obj->oop_iterate(&cl);
 892   }
 893 }
 894 
 895 void DefNewGeneration::save_marks() {
 896   set_saved_mark_word();
 897 }
 898 
 899 
 900 bool DefNewGeneration::no_allocs_since_save_marks() {
 901   return saved_mark_at_top();
 902 }
 903 
 904 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
 905   if (_promotion_failed) {
 906     return;
 907   }
 908 
 909   const size_t MinFreeScratchWords = 100;
 910 
 911   ContiguousSpace* to_space = to();
 912   const size_t free_words = pointer_delta(to_space->end(), to_space->top());
 913   if (free_words >= MinFreeScratchWords) {
 914     scratch = to_space->top();
 915     num_words = free_words;
 916   }
 917 }
 918 
 919 void DefNewGeneration::reset_scratch() {
 920   // If contributing scratch in to_space, mangle all of
 921   // to_space if ZapUnusedHeapArea.  This is needed because
 922   // top is not maintained while using to-space as scratch.
 923   if (ZapUnusedHeapArea) {
 924     to()->mangle_unused_area_complete();
 925   }
 926 }
 927 
 928 bool DefNewGeneration::collection_attempt_is_safe() {
 929   if (!to()->is_empty()) {
 930     log_trace(gc)(":: to is not empty ::");
 931     return false;
 932   }
 933   if (_old_gen == nullptr) {
 934     _old_gen = SerialHeap::heap()->old_gen();
 935   }
 936   return _old_gen->promotion_attempt_is_safe(used());
 937 }
 938 
 939 void DefNewGeneration::gc_epilogue(bool full) {
 940   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
 941 
 942   assert(!GCLocker::is_active(), "We should not be executing here");
 943   // Check if the heap is approaching full after a collection has
 944   // been done.  Generally the young generation is empty at
 945   // a minimum at the end of a collection.  If it is not, then
 946   // the heap is approaching full.
 947   SerialHeap* gch = SerialHeap::heap();
 948   if (full) {
 949     DEBUG_ONLY(seen_incremental_collection_failed = false;)
 950     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
 951       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
 952                             GCCause::to_string(gch->gc_cause()));
 953       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
 954       set_should_allocate_from_space(); // we seem to be running out of space
 955     } else {
 956       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
 957                             GCCause::to_string(gch->gc_cause()));
 958       gch->clear_incremental_collection_failed(); // We just did a full collection
 959       clear_should_allocate_from_space(); // if set
 960     }
 961   } else {
 962 #ifdef ASSERT
 963     // It is possible that incremental_collection_failed() == true
 964     // here, because an attempted scavenge did not succeed. The policy
 965     // is normally expected to cause a full collection which should
 966     // clear that condition, so we should not be here twice in a row
 967     // with incremental_collection_failed() == true without having done
 968     // a full collection in between.
 969     if (!seen_incremental_collection_failed &&
 970         gch->incremental_collection_failed()) {
 971       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
 972                             GCCause::to_string(gch->gc_cause()));
 973       seen_incremental_collection_failed = true;
 974     } else if (seen_incremental_collection_failed) {
 975       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
 976                             GCCause::to_string(gch->gc_cause()));
 977       seen_incremental_collection_failed = false;
 978     }
 979 #endif // ASSERT
 980   }
 981 
 982   if (ZapUnusedHeapArea) {
 983     eden()->check_mangled_unused_area_complete();
 984     from()->check_mangled_unused_area_complete();
 985     to()->check_mangled_unused_area_complete();
 986   }
 987 
 988   // update the generation and space performance counters
 989   update_counters();
 990   gch->counters()->update_counters();
 991 }
 992 
 993 void DefNewGeneration::record_spaces_top() {
 994   assert(ZapUnusedHeapArea, "Not mangling unused space");
 995   eden()->set_top_for_allocations();
 996   to()->set_top_for_allocations();
 997   from()->set_top_for_allocations();
 998 }
 999 
1000 void DefNewGeneration::update_counters() {
1001   if (UsePerfData) {
1002     _eden_counters->update_all();
1003     _from_counters->update_all();
1004     _to_counters->update_all();
1005     _gen_counters->update_all();
1006   }
1007 }
1008 
1009 void DefNewGeneration::verify() {
1010   eden()->verify();
1011   from()->verify();
1012     to()->verify();
1013 }
1014 
1015 void DefNewGeneration::print_on(outputStream* st) const {
1016   Generation::print_on(st);
1017   st->print("  eden");
1018   eden()->print_on(st);
1019   st->print("  from");
1020   from()->print_on(st);
1021   st->print("  to  ");
1022   to()->print_on(st);
1023 }
1024 
1025 
1026 const char* DefNewGeneration::name() const {
1027   return "def new generation";
1028 }
1029 
1030 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
1031   // This is the slow-path allocation for the DefNewGeneration.
1032   // Most allocations are fast-path in compiled code.
1033   // We try to allocate from the eden.  If that works, we are happy.
1034   // Note that since DefNewGeneration supports lock-free allocation, we
1035   // have to use it here, as well.
1036   HeapWord* result = eden()->par_allocate(word_size);
1037   if (result == nullptr) {
1038     // If the eden is full and the last collection bailed out, we are running
1039     // out of heap space, and we try to allocate the from-space, too.
1040     // allocate_from_space can't be inlined because that would introduce a
1041     // circular dependency at compile time.
1042     result = allocate_from_space(word_size);
1043   }
1044   return result;
1045 }
1046 
1047 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1048                                          bool is_tlab) {
1049   return eden()->par_allocate(word_size);
1050 }
1051 
1052 size_t DefNewGeneration::tlab_capacity() const {
1053   return eden()->capacity();
1054 }
1055 
1056 size_t DefNewGeneration::tlab_used() const {
1057   return eden()->used();
1058 }
1059 
1060 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1061   return unsafe_max_alloc_nogc();
1062 }