1 /*
   2  * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/serial/cardTableRS.hpp"
  27 #include "gc/serial/defNewGeneration.inline.hpp"
  28 #include "gc/serial/serialGcRefProcProxyTask.hpp"
  29 #include "gc/serial/serialHeap.inline.hpp"
  30 #include "gc/serial/serialStringDedup.inline.hpp"
  31 #include "gc/serial/tenuredGeneration.hpp"
  32 #include "gc/shared/adaptiveSizePolicy.hpp"
  33 #include "gc/shared/ageTable.inline.hpp"
  34 #include "gc/shared/collectorCounters.hpp"
  35 #include "gc/shared/continuationGCSupport.inline.hpp"
  36 #include "gc/shared/gcArguments.hpp"
  37 #include "gc/shared/gcHeapSummary.hpp"
  38 #include "gc/shared/gcLocker.hpp"
  39 #include "gc/shared/gcPolicyCounters.hpp"
  40 #include "gc/shared/gcTimer.hpp"
  41 #include "gc/shared/gcTrace.hpp"
  42 #include "gc/shared/gcTraceTime.inline.hpp"
  43 #include "gc/shared/referencePolicy.hpp"
  44 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  45 #include "gc/shared/space.inline.hpp"
  46 #include "gc/shared/spaceDecorator.inline.hpp"
  47 #include "gc/shared/strongRootsScope.hpp"
  48 #include "gc/shared/weakProcessor.hpp"
  49 #include "logging/log.hpp"
  50 #include "memory/iterator.inline.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "oops/instanceRefKlass.hpp"
  53 #include "oops/oop.inline.hpp"
  54 #include "runtime/java.hpp"
  55 #include "runtime/javaThread.hpp"
  56 #include "runtime/prefetch.inline.hpp"
  57 #include "runtime/threads.hpp"
  58 #include "utilities/align.hpp"
  59 #include "utilities/copy.hpp"
  60 #include "utilities/globalDefinitions.hpp"
  61 #include "utilities/stack.inline.hpp"
  62 
  63 class PromoteFailureClosure : public InHeapScanClosure {
  64   template <typename T>
  65   void do_oop_work(T* p) {
  66     assert(is_in_young_gen(p), "promote-fail objs must be in young-gen");
  67     assert(!SerialHeap::heap()->young_gen()->to()->is_in_reserved(p), "must not be in to-space");
  68 
  69     try_scavenge(p, [] (auto) {});
  70   }
  71 public:
  72   PromoteFailureClosure(DefNewGeneration* g) : InHeapScanClosure(g) {}
  73 
  74   void do_oop(oop* p)       { do_oop_work(p); }
  75   void do_oop(narrowOop* p) { do_oop_work(p); }
  76 };
  77 
  78 class RootScanClosure : public OffHeapScanClosure {
  79   template <typename T>
  80   void do_oop_work(T* p) {
  81     assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
  82 
  83     try_scavenge(p,  [] (auto) {});
  84   }
  85 public:
  86   RootScanClosure(DefNewGeneration* g) : OffHeapScanClosure(g) {}
  87 
  88   void do_oop(oop* p)       { do_oop_work(p); }
  89   void do_oop(narrowOop* p) { do_oop_work(p); }
  90 };
  91 
  92 class CLDScanClosure: public CLDClosure {
  93 
  94   class CLDOopClosure : public OffHeapScanClosure {
  95     ClassLoaderData* _scanned_cld;
  96 
  97     template <typename T>
  98     void do_oop_work(T* p) {
  99       assert(!SerialHeap::heap()->is_in_reserved(p), "outside the heap");
 100 
 101       try_scavenge(p, [&] (oop new_obj) {
 102         assert(_scanned_cld != nullptr, "inv");
 103         if (is_in_young_gen(new_obj) && !_scanned_cld->has_modified_oops()) {
 104           _scanned_cld->record_modified_oops();
 105         }
 106       });
 107     }
 108 
 109   public:
 110     CLDOopClosure(DefNewGeneration* g) : OffHeapScanClosure(g),
 111       _scanned_cld(nullptr) {}
 112 
 113     void set_scanned_cld(ClassLoaderData* cld) {
 114       assert(cld == nullptr || _scanned_cld == nullptr, "Must be");
 115       _scanned_cld = cld;
 116     }
 117 
 118     void do_oop(oop* p)       { do_oop_work(p); }
 119     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 120   };
 121 
 122   CLDOopClosure _oop_closure;
 123  public:
 124   CLDScanClosure(DefNewGeneration* g) : _oop_closure(g) {}
 125 
 126   void do_cld(ClassLoaderData* cld) {
 127     // If the cld has not been dirtied we know that there's
 128     // no references into  the young gen and we can skip it.
 129     if (cld->has_modified_oops()) {
 130 
 131       // Tell the closure which CLD is being scanned so that it can be dirtied
 132       // if oops are left pointing into the young gen.
 133       _oop_closure.set_scanned_cld(cld);
 134 
 135       // Clean the cld since we're going to scavenge all the metadata.
 136       cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, /*clear_modified_oops*/true);
 137 
 138       _oop_closure.set_scanned_cld(nullptr);
 139     }
 140   }
 141 };
 142 
 143 class IsAliveClosure: public BoolObjectClosure {
 144   HeapWord*         _young_gen_end;
 145 public:
 146   IsAliveClosure(DefNewGeneration* g): _young_gen_end(g->reserved().end()) {}
 147 
 148   bool do_object_b(oop p) {
 149     return cast_from_oop<HeapWord*>(p) >= _young_gen_end || p->is_forwarded();
 150   }
 151 };
 152 
 153 class AdjustWeakRootClosure: public OffHeapScanClosure {
 154   template <class T>
 155   void do_oop_work(T* p) {
 156     DEBUG_ONLY(SerialHeap* heap = SerialHeap::heap();)
 157     assert(!heap->is_in_reserved(p), "outside the heap");
 158 
 159     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
 160     if (is_in_young_gen(obj)) {
 161       assert(!heap->young_gen()->to()->is_in_reserved(obj), "inv");
 162       assert(obj->is_forwarded(), "forwarded before weak-root-processing");
 163       oop new_obj = obj->forwardee();
 164       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
 165     }
 166   }
 167  public:
 168   AdjustWeakRootClosure(DefNewGeneration* g): OffHeapScanClosure(g) {}
 169 
 170   void do_oop(oop* p)       { do_oop_work(p); }
 171   void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 172 };
 173 
 174 class KeepAliveClosure: public OopClosure {
 175   DefNewGeneration* _young_gen;
 176   HeapWord*         _young_gen_end;
 177   CardTableRS* _rs;
 178 
 179   bool is_in_young_gen(void* p) const {
 180     return p < _young_gen_end;
 181   }
 182 
 183   template <class T>
 184   void do_oop_work(T* p) {
 185     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
 186 
 187     if (is_in_young_gen(obj)) {
 188       oop new_obj = obj->is_forwarded() ? obj->forwardee()
 189                                         : _young_gen->copy_to_survivor_space(obj);
 190       RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
 191 
 192       if (is_in_young_gen(new_obj) && !is_in_young_gen(p)) {
 193         _rs->inline_write_ref_field_gc(p);
 194       }
 195     }
 196   }
 197 public:
 198   KeepAliveClosure(DefNewGeneration* g) :
 199     _young_gen(g),
 200     _young_gen_end(g->reserved().end()),
 201     _rs(SerialHeap::heap()->rem_set()) {}
 202 
 203   void do_oop(oop* p)       { do_oop_work(p); }
 204   void do_oop(narrowOop* p) { do_oop_work(p); }
 205 };
 206 
 207 class FastEvacuateFollowersClosure: public VoidClosure {
 208   SerialHeap* _heap;
 209   YoungGenScanClosure* _young_cl;
 210   OldGenScanClosure* _old_cl;
 211 public:
 212   FastEvacuateFollowersClosure(SerialHeap* heap,
 213                                YoungGenScanClosure* young_cl,
 214                                OldGenScanClosure* old_cl) :
 215     _heap(heap), _young_cl(young_cl), _old_cl(old_cl)
 216   {}
 217 
 218   void do_void() {
 219     _heap->scan_evacuated_objs(_young_cl, _old_cl);
 220   }
 221 };
 222 
 223 DefNewGeneration::DefNewGeneration(ReservedSpace rs,
 224                                    size_t initial_size,
 225                                    size_t min_size,
 226                                    size_t max_size,
 227                                    const char* policy)
 228   : Generation(rs, initial_size),
 229     _promo_failure_drain_in_progress(false),
 230     _should_allocate_from_space(false),
 231     _string_dedup_requests()
 232 {
 233   MemRegion cmr((HeapWord*)_virtual_space.low(),
 234                 (HeapWord*)_virtual_space.high());
 235   SerialHeap* gch = SerialHeap::heap();
 236 
 237   gch->rem_set()->resize_covered_region(cmr);
 238 
 239   _eden_space = new ContiguousSpace();
 240   _from_space = new ContiguousSpace();
 241   _to_space   = new ContiguousSpace();
 242 
 243   // Compute the maximum eden and survivor space sizes. These sizes
 244   // are computed assuming the entire reserved space is committed.
 245   // These values are exported as performance counters.
 246   uintx size = _virtual_space.reserved_size();
 247   _max_survivor_size = compute_survivor_size(size, SpaceAlignment);
 248   _max_eden_size = size - (2*_max_survivor_size);
 249 
 250   // allocate the performance counters
 251 
 252   // Generation counters -- generation 0, 3 subspaces
 253   _gen_counters = new GenerationCounters("new", 0, 3,
 254       min_size, max_size, &_virtual_space);
 255   _gc_counters = new CollectorCounters(policy, 0);
 256 
 257   _eden_counters = new CSpaceCounters("eden", 0, _max_eden_size, _eden_space,
 258                                       _gen_counters);
 259   _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
 260                                       _gen_counters);
 261   _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
 262                                     _gen_counters);
 263 
 264   compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 265   update_counters();
 266   _old_gen = nullptr;
 267   _tenuring_threshold = MaxTenuringThreshold;
 268   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
 269 
 270   _ref_processor = nullptr;
 271 
 272   _gc_timer = new STWGCTimer();
 273 
 274   _gc_tracer = new DefNewTracer();
 275 }
 276 
 277 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
 278                                                 bool clear_space,
 279                                                 bool mangle_space) {
 280   // If the spaces are being cleared (only done at heap initialization
 281   // currently), the survivor spaces need not be empty.
 282   // Otherwise, no care is taken for used areas in the survivor spaces
 283   // so check.
 284   assert(clear_space || (to()->is_empty() && from()->is_empty()),
 285     "Initialization of the survivor spaces assumes these are empty");
 286 
 287   // Compute sizes
 288   uintx size = _virtual_space.committed_size();
 289   uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
 290   uintx eden_size = size - (2*survivor_size);
 291   if (eden_size > max_eden_size()) {
 292     // Need to reduce eden_size to satisfy the max constraint. The delta needs
 293     // to be 2*SpaceAlignment aligned so that both survivors are properly
 294     // aligned.
 295     uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
 296     eden_size     -= eden_delta;
 297     survivor_size += eden_delta/2;
 298   }
 299   assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 300 
 301   if (eden_size < minimum_eden_size) {
 302     // May happen due to 64Kb rounding, if so adjust eden size back up
 303     minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
 304     uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
 305     uintx unaligned_survivor_size =
 306       align_down(maximum_survivor_size, SpaceAlignment);
 307     survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
 308     eden_size = size - (2*survivor_size);
 309     assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
 310     assert(eden_size >= minimum_eden_size, "just checking");
 311   }
 312 
 313   char *eden_start = _virtual_space.low();
 314   char *from_start = eden_start + eden_size;
 315   char *to_start   = from_start + survivor_size;
 316   char *to_end     = to_start   + survivor_size;
 317 
 318   assert(to_end == _virtual_space.high(), "just checking");
 319   assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
 320   assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
 321   assert(is_aligned(to_start, SpaceAlignment),   "checking alignment");
 322 
 323   MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
 324   MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
 325   MemRegion toMR  ((HeapWord*)to_start, (HeapWord*)to_end);
 326 
 327   // A minimum eden size implies that there is a part of eden that
 328   // is being used and that affects the initialization of any
 329   // newly formed eden.
 330   bool live_in_eden = minimum_eden_size > 0;
 331 
 332   // If not clearing the spaces, do some checking to verify that
 333   // the space are already mangled.
 334   if (!clear_space) {
 335     // Must check mangling before the spaces are reshaped.  Otherwise,
 336     // the bottom or end of one space may have moved into another
 337     // a failure of the check may not correctly indicate which space
 338     // is not properly mangled.
 339     if (ZapUnusedHeapArea) {
 340       HeapWord* limit = (HeapWord*) _virtual_space.high();
 341       eden()->check_mangled_unused_area(limit);
 342       from()->check_mangled_unused_area(limit);
 343         to()->check_mangled_unused_area(limit);
 344     }
 345   }
 346 
 347   // Reset the spaces for their new regions.
 348   eden()->initialize(edenMR,
 349                      clear_space && !live_in_eden,
 350                      SpaceDecorator::Mangle);
 351   // If clear_space and live_in_eden, we will not have cleared any
 352   // portion of eden above its top. This can cause newly
 353   // expanded space not to be mangled if using ZapUnusedHeapArea.
 354   // We explicitly do such mangling here.
 355   if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
 356     eden()->mangle_unused_area();
 357   }
 358   from()->initialize(fromMR, clear_space, mangle_space);
 359   to()->initialize(toMR, clear_space, mangle_space);
 360 }
 361 
 362 void DefNewGeneration::swap_spaces() {
 363   ContiguousSpace* s = from();
 364   _from_space        = to();
 365   _to_space          = s;
 366 
 367   if (UsePerfData) {
 368     CSpaceCounters* c = _from_counters;
 369     _from_counters = _to_counters;
 370     _to_counters = c;
 371   }
 372 }
 373 
 374 bool DefNewGeneration::expand(size_t bytes) {
 375   HeapWord* prev_high = (HeapWord*) _virtual_space.high();
 376   bool success = _virtual_space.expand_by(bytes);
 377   if (success && ZapUnusedHeapArea) {
 378     // Mangle newly committed space immediately because it
 379     // can be done here more simply that after the new
 380     // spaces have been computed.
 381     HeapWord* new_high = (HeapWord*) _virtual_space.high();
 382     MemRegion mangle_region(prev_high, new_high);
 383     SpaceMangler::mangle_region(mangle_region);
 384   }
 385 
 386   // Do not attempt an expand-to-the reserve size.  The
 387   // request should properly observe the maximum size of
 388   // the generation so an expand-to-reserve should be
 389   // unnecessary.  Also a second call to expand-to-reserve
 390   // value potentially can cause an undue expansion.
 391   // For example if the first expand fail for unknown reasons,
 392   // but the second succeeds and expands the heap to its maximum
 393   // value.
 394   if (GCLocker::is_active()) {
 395     log_debug(gc)("Garbage collection disabled, expanded heap instead");
 396   }
 397 
 398   return success;
 399 }
 400 
 401 size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
 402     size_t thread_increase_size = 0;
 403     // Check an overflow at 'threads_count * NewSizeThreadIncrease'.
 404     if (threads_count > 0 && NewSizeThreadIncrease <= max_uintx / threads_count) {
 405       thread_increase_size = threads_count * NewSizeThreadIncrease;
 406     }
 407     return thread_increase_size;
 408 }
 409 
 410 size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
 411                                                     size_t new_size_before,
 412                                                     size_t alignment,
 413                                                     size_t thread_increase_size) const {
 414   size_t desired_new_size = new_size_before;
 415 
 416   if (NewSizeThreadIncrease > 0 && thread_increase_size > 0) {
 417 
 418     // 1. Check an overflow at 'new_size_candidate + thread_increase_size'.
 419     if (new_size_candidate <= max_uintx - thread_increase_size) {
 420       new_size_candidate += thread_increase_size;
 421 
 422       // 2. Check an overflow at 'align_up'.
 423       size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
 424       if (new_size_candidate <= aligned_max) {
 425         desired_new_size = align_up(new_size_candidate, alignment);
 426       }
 427     }
 428   }
 429 
 430   return desired_new_size;
 431 }
 432 
 433 void DefNewGeneration::compute_new_size() {
 434   // This is called after a GC that includes the old generation, so from-space
 435   // will normally be empty.
 436   // Note that we check both spaces, since if scavenge failed they revert roles.
 437   // If not we bail out (otherwise we would have to relocate the objects).
 438   if (!from()->is_empty() || !to()->is_empty()) {
 439     return;
 440   }
 441 
 442   SerialHeap* gch = SerialHeap::heap();
 443 
 444   size_t old_size = gch->old_gen()->capacity();
 445   size_t new_size_before = _virtual_space.committed_size();
 446   size_t min_new_size = NewSize;
 447   size_t max_new_size = reserved().byte_size();
 448   assert(min_new_size <= new_size_before &&
 449          new_size_before <= max_new_size,
 450          "just checking");
 451   // All space sizes must be multiples of Generation::GenGrain.
 452   size_t alignment = Generation::GenGrain;
 453 
 454   int threads_count = Threads::number_of_non_daemon_threads();
 455   size_t thread_increase_size = calculate_thread_increase_size(threads_count);
 456 
 457   size_t new_size_candidate = old_size / NewRatio;
 458   // Compute desired new generation size based on NewRatio and NewSizeThreadIncrease
 459   // and reverts to previous value if any overflow happens
 460   size_t desired_new_size = adjust_for_thread_increase(new_size_candidate, new_size_before,
 461                                                        alignment, thread_increase_size);
 462 
 463   // Adjust new generation size
 464   desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
 465   assert(desired_new_size <= max_new_size, "just checking");
 466 
 467   bool changed = false;
 468   if (desired_new_size > new_size_before) {
 469     size_t change = desired_new_size - new_size_before;
 470     assert(change % alignment == 0, "just checking");
 471     if (expand(change)) {
 472        changed = true;
 473     }
 474     // If the heap failed to expand to the desired size,
 475     // "changed" will be false.  If the expansion failed
 476     // (and at this point it was expected to succeed),
 477     // ignore the failure (leaving "changed" as false).
 478   }
 479   if (desired_new_size < new_size_before && eden()->is_empty()) {
 480     // bail out of shrinking if objects in eden
 481     size_t change = new_size_before - desired_new_size;
 482     assert(change % alignment == 0, "just checking");
 483     _virtual_space.shrink_by(change);
 484     changed = true;
 485   }
 486   if (changed) {
 487     // The spaces have already been mangled at this point but
 488     // may not have been cleared (set top = bottom) and should be.
 489     // Mangling was done when the heap was being expanded.
 490     compute_space_boundaries(eden()->used(),
 491                              SpaceDecorator::Clear,
 492                              SpaceDecorator::DontMangle);
 493     MemRegion cmr((HeapWord*)_virtual_space.low(),
 494                   (HeapWord*)_virtual_space.high());
 495     gch->rem_set()->resize_covered_region(cmr);
 496 
 497     log_debug(gc, ergo, heap)(
 498         "New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
 499         new_size_before/K, _virtual_space.committed_size()/K,
 500         eden()->capacity()/K, from()->capacity()/K);
 501     log_trace(gc, ergo, heap)(
 502         "  [allowed " SIZE_FORMAT "K extra for %d threads]",
 503           thread_increase_size/K, threads_count);
 504       }
 505 }
 506 
 507 void DefNewGeneration::ref_processor_init() {
 508   assert(_ref_processor == nullptr, "a reference processor already exists");
 509   assert(!_reserved.is_empty(), "empty generation?");
 510   _span_based_discoverer.set_span(_reserved);
 511   _ref_processor = new ReferenceProcessor(&_span_based_discoverer);    // a vanilla reference processor
 512 }
 513 
 514 size_t DefNewGeneration::capacity() const {
 515   return eden()->capacity()
 516        + from()->capacity();  // to() is only used during scavenge
 517 }
 518 
 519 
 520 size_t DefNewGeneration::used() const {
 521   return eden()->used()
 522        + from()->used();      // to() is only used during scavenge
 523 }
 524 
 525 
 526 size_t DefNewGeneration::free() const {
 527   return eden()->free()
 528        + from()->free();      // to() is only used during scavenge
 529 }
 530 
 531 size_t DefNewGeneration::max_capacity() const {
 532   const size_t reserved_bytes = reserved().byte_size();
 533   return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
 534 }
 535 
 536 bool DefNewGeneration::is_in(const void* p) const {
 537   return eden()->is_in(p)
 538       || from()->is_in(p)
 539       || to()  ->is_in(p);
 540 }
 541 
 542 size_t DefNewGeneration::unsafe_max_alloc_nogc() const {
 543   return eden()->free();
 544 }
 545 
 546 size_t DefNewGeneration::capacity_before_gc() const {
 547   return eden()->capacity();
 548 }
 549 
 550 size_t DefNewGeneration::contiguous_available() const {
 551   return eden()->free();
 552 }
 553 
 554 
 555 void DefNewGeneration::object_iterate(ObjectClosure* blk) {
 556   eden()->object_iterate(blk);
 557   from()->object_iterate(blk);
 558 }
 559 
 560 // If "p" is in the space, returns the address of the start of the
 561 // "block" that contains "p".  We say "block" instead of "object" since
 562 // some heaps may not pack objects densely; a chunk may either be an
 563 // object or a non-object.  If "p" is not in the space, return null.
 564 // Very general, slow implementation.
 565 static HeapWord* block_start_const(const ContiguousSpace* cs, const void* p) {
 566   assert(MemRegion(cs->bottom(), cs->end()).contains(p),
 567          "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
 568          p2i(p), p2i(cs->bottom()), p2i(cs->end()));
 569   if (p >= cs->top()) {
 570     return cs->top();
 571   } else {
 572     HeapWord* last = cs->bottom();
 573     HeapWord* cur = last;
 574     while (cur <= p) {
 575       last = cur;
 576       cur += cast_to_oop(cur)->size();
 577     }
 578     assert(oopDesc::is_oop(cast_to_oop(last)), PTR_FORMAT " should be an object start", p2i(last));
 579     return last;
 580   }
 581 }
 582 
 583 HeapWord* DefNewGeneration::block_start(const void* p) const {
 584   if (eden()->is_in_reserved(p)) {
 585     return block_start_const(eden(), p);
 586   }
 587   if (from()->is_in_reserved(p)) {
 588     return block_start_const(from(), p);
 589   }
 590   assert(to()->is_in_reserved(p), "inv");
 591   return block_start_const(to(), p);
 592 }
 593 
 594 // The last collection bailed out, we are running out of heap space,
 595 // so we try to allocate the from-space, too.
 596 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
 597   bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
 598 
 599   // If the Heap_lock is not locked by this thread, this will be called
 600   // again later with the Heap_lock held.
 601   bool do_alloc = should_try_alloc && (Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()));
 602 
 603   HeapWord* result = nullptr;
 604   if (do_alloc) {
 605     result = from()->allocate(size);
 606   }
 607 
 608   log_trace(gc, alloc)("DefNewGeneration::allocate_from_space(" SIZE_FORMAT "):  will_fail: %s  heap_lock: %s  free: " SIZE_FORMAT "%s%s returns %s",
 609                         size,
 610                         SerialHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
 611                           "true" : "false",
 612                         Heap_lock->is_locked() ? "locked" : "unlocked",
 613                         from()->free(),
 614                         should_try_alloc ? "" : "  should_allocate_from_space: NOT",
 615                         do_alloc ? "  Heap_lock is not owned by self" : "",
 616                         result == nullptr ? "null" : "object");
 617 
 618   return result;
 619 }
 620 
 621 HeapWord* DefNewGeneration::expand_and_allocate(size_t size, bool is_tlab) {
 622   // We don't attempt to expand the young generation (but perhaps we should.)
 623   return allocate(size, is_tlab);
 624 }
 625 
 626 void DefNewGeneration::adjust_desired_tenuring_threshold() {
 627   // Set the desired survivor size to half the real survivor space
 628   size_t const survivor_capacity = to()->capacity() / HeapWordSize;
 629   size_t const desired_survivor_size = (size_t)((((double)survivor_capacity) * TargetSurvivorRatio) / 100);
 630 
 631   _tenuring_threshold = age_table()->compute_tenuring_threshold(desired_survivor_size);
 632 
 633   if (UsePerfData) {
 634     GCPolicyCounters* gc_counters = SerialHeap::heap()->counters();
 635     gc_counters->tenuring_threshold()->set_value(_tenuring_threshold);
 636     gc_counters->desired_survivor_size()->set_value(desired_survivor_size * oopSize);
 637   }
 638 
 639   age_table()->print_age_table();
 640 }
 641 
 642 void DefNewGeneration::collect(bool   full,
 643                                bool   clear_all_soft_refs,
 644                                size_t size,
 645                                bool   is_tlab) {
 646   assert(full || size > 0, "otherwise we don't want to collect");
 647 
 648   SerialHeap* heap = SerialHeap::heap();
 649 
 650   // If the next generation is too full to accommodate promotion
 651   // from this generation, pass on collection; let the next generation
 652   // do it.
 653   if (!collection_attempt_is_safe()) {
 654     log_trace(gc)(":: Collection attempt not safe ::");
 655     heap->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
 656     return;
 657   }
 658   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 659   _gc_timer->register_gc_start();
 660   _gc_tracer->report_gc_start(heap->gc_cause(), _gc_timer->gc_start());
 661   _ref_processor->start_discovery(clear_all_soft_refs);
 662 
 663   _old_gen = heap->old_gen();
 664 
 665   init_assuming_no_promotion_failure();
 666 
 667   GCTraceTime(Trace, gc, phases) tm("DefNew", nullptr, heap->gc_cause());
 668 
 669   heap->trace_heap_before_gc(_gc_tracer);
 670 
 671   // These can be shared for all code paths
 672   IsAliveClosure is_alive(this);
 673 
 674   age_table()->clear();
 675   to()->clear(SpaceDecorator::Mangle);
 676 
 677   assert(heap->no_allocs_since_save_marks(),
 678          "save marks have not been newly set.");
 679 
 680   YoungGenScanClosure young_gen_cl(this);
 681   OldGenScanClosure   old_gen_cl(this);
 682 
 683   FastEvacuateFollowersClosure evacuate_followers(heap,
 684                                                   &young_gen_cl,
 685                                                   &old_gen_cl);
 686 
 687   assert(heap->no_allocs_since_save_marks(),
 688          "save marks have not been newly set.");
 689 
 690   {
 691     StrongRootsScope srs(0);
 692     RootScanClosure root_cl{this};
 693     CLDScanClosure cld_cl{this};
 694 
 695     MarkingNMethodClosure code_cl(&root_cl,
 696                                   NMethodToOopClosure::FixRelocations,
 697                                   false /* keepalive_nmethods */);
 698 
 699     heap->process_roots(SerialHeap::SO_ScavengeCodeCache,
 700                         &root_cl,
 701                         &cld_cl,
 702                         &cld_cl,
 703                         &code_cl);
 704 
 705     _old_gen->scan_old_to_young_refs();
 706   }
 707 
 708   // "evacuate followers".
 709   evacuate_followers.do_void();
 710 
 711   {
 712     // Reference processing
 713     KeepAliveClosure keep_alive(this);
 714     ReferenceProcessor* rp = ref_processor();
 715     ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
 716     SerialGCRefProcProxyTask task(is_alive, keep_alive, evacuate_followers);
 717     const ReferenceProcessorStats& stats = rp->process_discovered_references(task, pt);
 718     _gc_tracer->report_gc_reference_stats(stats);
 719     _gc_tracer->report_tenuring_threshold(tenuring_threshold());
 720     pt.print_all_references();
 721   }
 722   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
 723 
 724   {
 725     AdjustWeakRootClosure cl{this};
 726     WeakProcessor::weak_oops_do(&is_alive, &cl);
 727   }
 728 
 729   // Verify that the usage of keep_alive didn't copy any objects.
 730   assert(heap->no_allocs_since_save_marks(), "save marks have not been newly set.");
 731 
 732   _string_dedup_requests.flush();
 733 
 734   if (!_promotion_failed) {
 735     // Swap the survivor spaces.
 736     eden()->clear(SpaceDecorator::Mangle);
 737     from()->clear(SpaceDecorator::Mangle);
 738     if (ZapUnusedHeapArea) {
 739       // This is now done here because of the piece-meal mangling which
 740       // can check for valid mangling at intermediate points in the
 741       // collection(s).  When a young collection fails to collect
 742       // sufficient space resizing of the young generation can occur
 743       // an redistribute the spaces in the young generation.  Mangle
 744       // here so that unzapped regions don't get distributed to
 745       // other spaces.
 746       to()->mangle_unused_area();
 747     }
 748     swap_spaces();
 749 
 750     assert(to()->is_empty(), "to space should be empty now");
 751 
 752     adjust_desired_tenuring_threshold();
 753 
 754     assert(!heap->incremental_collection_failed(), "Should be clear");
 755   } else {
 756     assert(_promo_failure_scan_stack.is_empty(), "post condition");
 757     _promo_failure_scan_stack.clear(true); // Clear cached segments.
 758 
 759     remove_forwarding_pointers();
 760     log_info(gc, promotion)("Promotion failed");
 761     // Add to-space to the list of space to compact
 762     // when a promotion failure has occurred.  In that
 763     // case there can be live objects in to-space
 764     // as a result of a partial evacuation of eden
 765     // and from-space.
 766     swap_spaces();   // For uniformity wrt ParNewGeneration.
 767     heap->set_incremental_collection_failed();
 768 
 769     _gc_tracer->report_promotion_failed(_promotion_failed_info);
 770 
 771     // Reset the PromotionFailureALot counters.
 772     NOT_PRODUCT(heap->reset_promotion_should_fail();)
 773   }
 774 
 775   heap->trace_heap_after_gc(_gc_tracer);
 776 
 777   _gc_timer->register_gc_end();
 778 
 779   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 780 }
 781 
 782 void DefNewGeneration::init_assuming_no_promotion_failure() {
 783   _promotion_failed = false;
 784   _promotion_failed_info.reset();
 785 }
 786 
 787 void DefNewGeneration::remove_forwarding_pointers() {
 788   assert(_promotion_failed, "precondition");
 789 
 790   // Will enter Full GC soon due to failed promotion. Must reset the mark word
 791   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
 792   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
 793   struct ResetForwardedMarkWord : ObjectClosure {
 794     void do_object(oop obj) override {
 795       if (obj->is_self_forwarded()) {
 796         obj->unset_self_forwarded();
 797       } else if (obj->is_forwarded()) {
 798         obj->forward_safe_init_mark();
 799       }
 800     }
 801   } cl;
 802   eden()->object_iterate(&cl);
 803   from()->object_iterate(&cl);
 804 }
 805 
 806 void DefNewGeneration::handle_promotion_failure(oop old) {
 807   log_debug(gc, promotion)("Promotion failure size = " SIZE_FORMAT ") ", old->size());
 808 
 809   _promotion_failed = true;
 810   _promotion_failed_info.register_copy_failure(old->size());
 811 
 812   ContinuationGCSupport::transform_stack_chunk(old);
 813 
 814   // forward to self
 815   old->forward_to_self();
 816 
 817   _promo_failure_scan_stack.push(old);
 818 
 819   if (!_promo_failure_drain_in_progress) {
 820     // prevent recursion in copy_to_survivor_space()
 821     _promo_failure_drain_in_progress = true;
 822     drain_promo_failure_scan_stack();
 823     _promo_failure_drain_in_progress = false;
 824   }
 825 }
 826 
 827 oop DefNewGeneration::copy_to_survivor_space(oop old) {
 828   assert(is_in_reserved(old) && !old->is_forwarded(),
 829          "shouldn't be scavenging this oop");
 830   size_t s = old->size();
 831   oop obj = nullptr;
 832 
 833   // Try allocating obj in to-space (unless too old)
 834   if (old->age() < tenuring_threshold()) {
 835     obj = cast_to_oop(to()->allocate(s));
 836   }
 837 
 838   bool new_obj_is_tenured = false;
 839   // Otherwise try allocating obj tenured
 840   if (obj == nullptr) {
 841     obj = _old_gen->promote(old, s);
 842     if (obj == nullptr) {
 843       handle_promotion_failure(old);
 844       return old;
 845     }
 846 
 847     ContinuationGCSupport::transform_stack_chunk(obj);
 848 
 849     new_obj_is_tenured = true;
 850   } else {
 851     // Prefetch beyond obj
 852     const intx interval = PrefetchCopyIntervalInBytes;
 853     Prefetch::write(obj, interval);
 854 
 855     // Copy obj
 856     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
 857 
 858     ContinuationGCSupport::transform_stack_chunk(obj);
 859 
 860     // Increment age if obj still in new generation
 861     obj->incr_age();
 862     age_table()->add(obj, s);
 863   }
 864 
 865   // Done, insert forward pointer to obj in this header
 866   old->forward_to(obj);
 867 
 868   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
 869     // Record old; request adds a new weak reference, which reference
 870     // processing expects to refer to a from-space object.
 871     _string_dedup_requests.add(old);
 872   }
 873   return obj;
 874 }
 875 
 876 void DefNewGeneration::drain_promo_failure_scan_stack() {
 877   PromoteFailureClosure cl{this};
 878   while (!_promo_failure_scan_stack.is_empty()) {
 879      oop obj = _promo_failure_scan_stack.pop();
 880      obj->oop_iterate(&cl);
 881   }
 882 }
 883 
 884 void DefNewGeneration::save_marks() {
 885   set_saved_mark_word();
 886 }
 887 
 888 
 889 bool DefNewGeneration::no_allocs_since_save_marks() {
 890   return saved_mark_at_top();
 891 }
 892 
 893 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
 894   if (_promotion_failed) {
 895     return;
 896   }
 897 
 898   const size_t MinFreeScratchWords = 100;
 899 
 900   ContiguousSpace* to_space = to();
 901   const size_t free_words = pointer_delta(to_space->end(), to_space->top());
 902   if (free_words >= MinFreeScratchWords) {
 903     scratch = to_space->top();
 904     num_words = free_words;
 905   }
 906 }
 907 
 908 void DefNewGeneration::reset_scratch() {
 909   // If contributing scratch in to_space, mangle all of
 910   // to_space if ZapUnusedHeapArea.  This is needed because
 911   // top is not maintained while using to-space as scratch.
 912   if (ZapUnusedHeapArea) {
 913     to()->mangle_unused_area_complete();
 914   }
 915 }
 916 
 917 bool DefNewGeneration::collection_attempt_is_safe() {
 918   if (!to()->is_empty()) {
 919     log_trace(gc)(":: to is not empty ::");
 920     return false;
 921   }
 922   if (_old_gen == nullptr) {
 923     _old_gen = SerialHeap::heap()->old_gen();
 924   }
 925   return _old_gen->promotion_attempt_is_safe(used());
 926 }
 927 
 928 void DefNewGeneration::gc_epilogue(bool full) {
 929   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
 930 
 931   assert(!GCLocker::is_active(), "We should not be executing here");
 932   // Check if the heap is approaching full after a collection has
 933   // been done.  Generally the young generation is empty at
 934   // a minimum at the end of a collection.  If it is not, then
 935   // the heap is approaching full.
 936   SerialHeap* gch = SerialHeap::heap();
 937   if (full) {
 938     DEBUG_ONLY(seen_incremental_collection_failed = false;)
 939     if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
 940       log_trace(gc)("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
 941                             GCCause::to_string(gch->gc_cause()));
 942       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
 943       set_should_allocate_from_space(); // we seem to be running out of space
 944     } else {
 945       log_trace(gc)("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
 946                             GCCause::to_string(gch->gc_cause()));
 947       gch->clear_incremental_collection_failed(); // We just did a full collection
 948       clear_should_allocate_from_space(); // if set
 949     }
 950   } else {
 951 #ifdef ASSERT
 952     // It is possible that incremental_collection_failed() == true
 953     // here, because an attempted scavenge did not succeed. The policy
 954     // is normally expected to cause a full collection which should
 955     // clear that condition, so we should not be here twice in a row
 956     // with incremental_collection_failed() == true without having done
 957     // a full collection in between.
 958     if (!seen_incremental_collection_failed &&
 959         gch->incremental_collection_failed()) {
 960       log_trace(gc)("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
 961                             GCCause::to_string(gch->gc_cause()));
 962       seen_incremental_collection_failed = true;
 963     } else if (seen_incremental_collection_failed) {
 964       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
 965                             GCCause::to_string(gch->gc_cause()));
 966       seen_incremental_collection_failed = false;
 967     }
 968 #endif // ASSERT
 969   }
 970 
 971   if (ZapUnusedHeapArea) {
 972     eden()->check_mangled_unused_area_complete();
 973     from()->check_mangled_unused_area_complete();
 974     to()->check_mangled_unused_area_complete();
 975   }
 976 
 977   // update the generation and space performance counters
 978   update_counters();
 979   gch->counters()->update_counters();
 980 }
 981 
 982 void DefNewGeneration::record_spaces_top() {
 983   assert(ZapUnusedHeapArea, "Not mangling unused space");
 984   eden()->set_top_for_allocations();
 985   to()->set_top_for_allocations();
 986   from()->set_top_for_allocations();
 987 }
 988 
 989 void DefNewGeneration::update_counters() {
 990   if (UsePerfData) {
 991     _eden_counters->update_all();
 992     _from_counters->update_all();
 993     _to_counters->update_all();
 994     _gen_counters->update_all();
 995   }
 996 }
 997 
 998 void DefNewGeneration::verify() {
 999   eden()->verify();
1000   from()->verify();
1001     to()->verify();
1002 }
1003 
1004 void DefNewGeneration::print_on(outputStream* st) const {
1005   Generation::print_on(st);
1006   st->print("  eden");
1007   eden()->print_on(st);
1008   st->print("  from");
1009   from()->print_on(st);
1010   st->print("  to  ");
1011   to()->print_on(st);
1012 }
1013 
1014 
1015 const char* DefNewGeneration::name() const {
1016   return "def new generation";
1017 }
1018 
1019 HeapWord* DefNewGeneration::allocate(size_t word_size, bool is_tlab) {
1020   // This is the slow-path allocation for the DefNewGeneration.
1021   // Most allocations are fast-path in compiled code.
1022   // We try to allocate from the eden.  If that works, we are happy.
1023   // Note that since DefNewGeneration supports lock-free allocation, we
1024   // have to use it here, as well.
1025   HeapWord* result = eden()->par_allocate(word_size);
1026   if (result == nullptr) {
1027     // If the eden is full and the last collection bailed out, we are running
1028     // out of heap space, and we try to allocate the from-space, too.
1029     // allocate_from_space can't be inlined because that would introduce a
1030     // circular dependency at compile time.
1031     result = allocate_from_space(word_size);
1032   }
1033   return result;
1034 }
1035 
1036 HeapWord* DefNewGeneration::par_allocate(size_t word_size,
1037                                          bool is_tlab) {
1038   return eden()->par_allocate(word_size);
1039 }
1040 
1041 size_t DefNewGeneration::tlab_capacity() const {
1042   return eden()->capacity();
1043 }
1044 
1045 size_t DefNewGeneration::tlab_used() const {
1046   return eden()->used();
1047 }
1048 
1049 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
1050   return unsafe_max_alloc_nogc();
1051 }