1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/serial/cardTableRS.hpp"
  34 #include "gc/serial/defNewGeneration.hpp"
  35 #include "gc/serial/genMarkSweep.hpp"
  36 #include "gc/serial/markSweep.hpp"
  37 #include "gc/shared/adaptiveSizePolicy.hpp"
  38 #include "gc/shared/cardTableBarrierSet.hpp"
  39 #include "gc/shared/collectedHeap.inline.hpp"
  40 #include "gc/shared/collectorCounters.hpp"
  41 #include "gc/shared/continuationGCSupport.inline.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcInitLogger.hpp"
  44 #include "gc/shared/gcLocker.hpp"
  45 #include "gc/shared/gcPolicyCounters.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.inline.hpp"
  48 #include "gc/shared/gcVMOperations.hpp"
  49 #include "gc/shared/genArguments.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/generationSpec.hpp"
  52 #include "gc/shared/locationPrinter.inline.hpp"
  53 #include "gc/shared/oopStorage.inline.hpp"
  54 #include "gc/shared/oopStorageParState.inline.hpp"
  55 #include "gc/shared/oopStorageSet.inline.hpp"
  56 #include "gc/shared/scavengableNMethods.hpp"
  57 #include "gc/shared/space.hpp"
  58 #include "gc/shared/strongRootsScope.hpp"
  59 #include "gc/shared/weakProcessor.hpp"
  60 #include "gc/shared/workerThread.hpp"
  61 #include "memory/iterator.hpp"
  62 #include "memory/metaspaceCounters.hpp"
  63 #include "memory/metaspaceUtils.hpp"
  64 #include "memory/resourceArea.hpp"
  65 #include "memory/universe.hpp"
  66 #include "oops/oop.inline.hpp"
  67 #include "runtime/handles.hpp"
  68 #include "runtime/handles.inline.hpp"
  69 #include "runtime/java.hpp"
  70 #include "runtime/threads.hpp"
  71 #include "runtime/vmThread.hpp"
  72 #include "services/memoryService.hpp"
  73 #include "utilities/autoRestore.hpp"
  74 #include "utilities/debug.hpp"
  75 #include "utilities/formatBuffer.hpp"
  76 #include "utilities/macros.hpp"
  77 #include "utilities/stack.inline.hpp"
  78 #include "utilities/vmError.hpp"
  79 #if INCLUDE_JVMCI
  80 #include "jvmci/jvmci.hpp"
  81 #endif
  82 
  83 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  84                                    Generation::Name old,
  85                                    const char* policy_counters_name) :
  86   CollectedHeap(),
  87   _young_gen(nullptr),
  88   _old_gen(nullptr),
  89   _young_gen_spec(new GenerationSpec(young,
  90                                      NewSize,
  91                                      MaxNewSize,
  92                                      GenAlignment)),
  93   _old_gen_spec(new GenerationSpec(old,
  94                                    OldSize,
  95                                    MaxOldSize,
  96                                    GenAlignment)),
  97   _rem_set(nullptr),
  98   _soft_ref_policy(),
  99   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
 100   _incremental_collection_failed(false),
 101   _full_collections_completed(0),
 102   _young_manager(nullptr),
 103   _old_manager(nullptr) {
 104 }
 105 
 106 jint GenCollectedHeap::initialize() {
 107   // Allocate space for the heap.
 108 
 109   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 110 
 111   if (!heap_rs.is_reserved()) {
 112     vm_shutdown_during_initialization(
 113       "Could not reserve enough space for object heap");
 114     return JNI_ENOMEM;
 115   }
 116 
 117   initialize_reserved_region(heap_rs);
 118 
 119   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
 120   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 121 
 122   _rem_set = create_rem_set(heap_rs.region());
 123   _rem_set->initialize(young_rs.base(), old_rs.base());
 124 
 125   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 126   bs->initialize();
 127   BarrierSet::set_barrier_set(bs);
 128 
 129   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 130   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 131 
 132   GCInitLogger::print();
 133 
 134   return JNI_OK;
 135 }
 136 
 137 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 138   return new CardTableRS(reserved_region);
 139 }
 140 
 141 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
 142   // Now figure out the total size.
 143   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 144   assert(alignment % pageSize == 0, "Must be");
 145 
 146   // Check for overflow.
 147   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 148   if (total_reserved < _young_gen_spec->max_size()) {
 149     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 150                                   "the maximum representable size");
 151   }
 152   assert(total_reserved % alignment == 0,
 153          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 154          SIZE_FORMAT, total_reserved, alignment);
 155 
 156   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 157   size_t used_page_size = heap_rs.page_size();
 158 
 159   os::trace_page_sizes("Heap",
 160                        MinHeapSize,
 161                        total_reserved,
 162                        heap_rs.base(),
 163                        heap_rs.size(),
 164                        used_page_size);
 165 
 166   return heap_rs;
 167 }
 168 
 169 class GenIsScavengable : public BoolObjectClosure {
 170 public:
 171   bool do_object_b(oop obj) {
 172     return GenCollectedHeap::heap()->is_in_young(obj);
 173   }
 174 };
 175 
 176 static GenIsScavengable _is_scavengable;
 177 
 178 void GenCollectedHeap::post_initialize() {
 179   CollectedHeap::post_initialize();
 180 
 181   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 182 
 183   def_new_gen->ref_processor_init();
 184 
 185   MarkSweep::initialize();
 186 
 187   ScavengableNMethods::initialize(&_is_scavengable);
 188 }
 189 
 190 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
 191   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 192 
 193   return PreGenGCValues(def_new_gen->used(),
 194                         def_new_gen->capacity(),
 195                         def_new_gen->eden()->used(),
 196                         def_new_gen->eden()->capacity(),
 197                         def_new_gen->from()->used(),
 198                         def_new_gen->from()->capacity(),
 199                         old_gen()->used(),
 200                         old_gen()->capacity());
 201 }
 202 
 203 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 204   return _young_gen_spec;
 205 }
 206 
 207 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 208   return _old_gen_spec;
 209 }
 210 
 211 size_t GenCollectedHeap::capacity() const {
 212   return _young_gen->capacity() + _old_gen->capacity();
 213 }
 214 
 215 size_t GenCollectedHeap::used() const {
 216   return _young_gen->used() + _old_gen->used();
 217 }
 218 
 219 void GenCollectedHeap::save_used_regions() {
 220   _old_gen->save_used_region();
 221   _young_gen->save_used_region();
 222 }
 223 
 224 size_t GenCollectedHeap::max_capacity() const {
 225   return _young_gen->max_capacity() + _old_gen->max_capacity();
 226 }
 227 
 228 // Update the _full_collections_completed counter
 229 // at the end of a stop-world full GC.
 230 unsigned int GenCollectedHeap::update_full_collections_completed() {
 231   assert(_full_collections_completed <= _total_full_collections,
 232          "Can't complete more collections than were started");
 233   _full_collections_completed = _total_full_collections;
 234   return _full_collections_completed;
 235 }
 236 
 237 // Return true if any of the following is true:
 238 // . the allocation won't fit into the current young gen heap
 239 // . gc locker is occupied (jni critical section)
 240 // . heap memory is tight -- the most recent previous collection
 241 //   was a full collection because a partial collection (would
 242 //   have) failed and is likely to fail again
 243 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 244   size_t young_capacity = _young_gen->capacity_before_gc();
 245   return    (word_size > heap_word_size(young_capacity))
 246          || GCLocker::is_active_and_needs_gc()
 247          || incremental_collection_failed();
 248 }
 249 
 250 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 251   HeapWord* result = nullptr;
 252   if (_old_gen->should_allocate(size, is_tlab)) {
 253     result = _old_gen->expand_and_allocate(size, is_tlab);
 254   }
 255   if (result == nullptr) {
 256     if (_young_gen->should_allocate(size, is_tlab)) {
 257       result = _young_gen->expand_and_allocate(size, is_tlab);
 258     }
 259   }
 260   assert(result == nullptr || is_in_reserved(result), "result not in heap");
 261   return result;
 262 }
 263 
 264 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 265                                               bool is_tlab) {
 266 
 267   HeapWord* result = nullptr;
 268 
 269   // Loop until the allocation is satisfied, or unsatisfied after GC.
 270   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 271 
 272     // First allocation attempt is lock-free.
 273     Generation *young = _young_gen;
 274     if (young->should_allocate(size, is_tlab)) {
 275       result = young->par_allocate(size, is_tlab);
 276       if (result != nullptr) {
 277         assert(is_in_reserved(result), "result not in heap");
 278         return result;
 279       }
 280     }
 281     uint gc_count_before;  // Read inside the Heap_lock locked region.
 282     {
 283       MutexLocker ml(Heap_lock);
 284       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 285       // Note that only large objects get a shot at being
 286       // allocated in later generations.
 287       bool first_only = !should_try_older_generation_allocation(size);
 288 
 289       result = attempt_allocation(size, is_tlab, first_only);
 290       if (result != nullptr) {
 291         assert(is_in_reserved(result), "result not in heap");
 292         return result;
 293       }
 294 
 295       if (GCLocker::is_active_and_needs_gc()) {
 296         if (is_tlab) {
 297           return nullptr;  // Caller will retry allocating individual object.
 298         }
 299         if (!is_maximal_no_gc()) {
 300           // Try and expand heap to satisfy request.
 301           result = expand_heap_and_allocate(size, is_tlab);
 302           // Result could be null if we are out of space.
 303           if (result != nullptr) {
 304             return result;
 305           }
 306         }
 307 
 308         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 309           return nullptr; // We didn't get to do a GC and we didn't get any memory.
 310         }
 311 
 312         // If this thread is not in a jni critical section, we stall
 313         // the requestor until the critical section has cleared and
 314         // GC allowed. When the critical section clears, a GC is
 315         // initiated by the last thread exiting the critical section; so
 316         // we retry the allocation sequence from the beginning of the loop,
 317         // rather than causing more, now probably unnecessary, GC attempts.
 318         JavaThread* jthr = JavaThread::current();
 319         if (!jthr->in_critical()) {
 320           MutexUnlocker mul(Heap_lock);
 321           // Wait for JNI critical section to be exited
 322           GCLocker::stall_until_clear();
 323           gclocker_stalled_count += 1;
 324           continue;
 325         } else {
 326           if (CheckJNICalls) {
 327             fatal("Possible deadlock due to allocating while"
 328                   " in jni critical section");
 329           }
 330           return nullptr;
 331         }
 332       }
 333 
 334       // Read the gc count while the heap lock is held.
 335       gc_count_before = total_collections();
 336     }
 337 
 338     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 339     VMThread::execute(&op);
 340     if (op.prologue_succeeded()) {
 341       result = op.result();
 342       if (op.gc_locked()) {
 343          assert(result == nullptr, "must be null if gc_locked() is true");
 344          continue;  // Retry and/or stall as necessary.
 345       }
 346 
 347       assert(result == nullptr || is_in_reserved(result),
 348              "result not in heap");
 349       return result;
 350     }
 351 
 352     // Give a warning if we seem to be looping forever.
 353     if ((QueuedAllocationWarningCount > 0) &&
 354         (try_count % QueuedAllocationWarningCount == 0)) {
 355           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 356                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 357     }
 358   }
 359 }
 360 
 361 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 362                                                bool is_tlab,
 363                                                bool first_only) {
 364   HeapWord* res = nullptr;
 365 
 366   if (_young_gen->should_allocate(size, is_tlab)) {
 367     res = _young_gen->allocate(size, is_tlab);
 368     if (res != nullptr || first_only) {
 369       return res;
 370     }
 371   }
 372 
 373   if (_old_gen->should_allocate(size, is_tlab)) {
 374     res = _old_gen->allocate(size, is_tlab);
 375   }
 376 
 377   return res;
 378 }
 379 
 380 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 381                                          bool* gc_overhead_limit_was_exceeded) {
 382   return mem_allocate_work(size,
 383                            false /* is_tlab */);
 384 }
 385 
 386 bool GenCollectedHeap::must_clear_all_soft_refs() {
 387   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 388          _gc_cause == GCCause::_wb_full_gc;
 389 }
 390 
 391 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 392                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
 393   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 394   GCTraceTime(Trace, gc, phases) t1(title);
 395   TraceCollectorStats tcs(gen->counters());
 396   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC");
 397 
 398   gen->stat_record()->invocations++;
 399   gen->stat_record()->accumulated_time.start();
 400 
 401   // Must be done anew before each collection because
 402   // a previous collection will do mangling and will
 403   // change top of some spaces.
 404   record_gen_tops_before_GC();
 405 
 406   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 407 
 408   if (run_verification && VerifyBeforeGC) {
 409     Universe::verify("Before GC");
 410   }
 411   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 412 
 413   // Do collection work
 414   {
 415     save_marks();   // save marks for all gens
 416 
 417     gen->collect(full, clear_soft_refs, size, is_tlab);
 418   }
 419 
 420   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 421 
 422   gen->stat_record()->accumulated_time.stop();
 423 
 424   update_gc_stats(gen, full);
 425 
 426   if (run_verification && VerifyAfterGC) {
 427     Universe::verify("After GC");
 428   }
 429 }
 430 
 431 void GenCollectedHeap::do_collection(bool           full,
 432                                      bool           clear_all_soft_refs,
 433                                      size_t         size,
 434                                      bool           is_tlab,
 435                                      GenerationType max_generation) {
 436   ResourceMark rm;
 437   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 438 
 439   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 440   assert(my_thread->is_VM_thread(), "only VM thread");
 441   assert(Heap_lock->is_locked(),
 442          "the requesting thread should have the Heap_lock");
 443   guarantee(!is_gc_active(), "collection is not reentrant");
 444 
 445   if (GCLocker::check_active_before_gc()) {
 446     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 447   }
 448 
 449   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 450                           soft_ref_policy()->should_clear_all_soft_refs();
 451 
 452   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 453 
 454   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
 455 
 456   bool complete = full && (max_generation == OldGen);
 457   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 458   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 459 
 460   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 461 
 462   bool run_verification = total_collections() >= VerifyGCStartAt;
 463   bool prepared_for_verification = false;
 464   bool do_full_collection = false;
 465 
 466   if (do_young_collection) {
 467     GCIdMark gc_id_mark;
 468     GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
 469     GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
 470 
 471     print_heap_before_gc();
 472 
 473     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 474       prepare_for_verify();
 475       prepared_for_verification = true;
 476     }
 477 
 478     gc_prologue(complete);
 479     increment_total_collections(complete);
 480 
 481     collect_generation(_young_gen,
 482                        full,
 483                        size,
 484                        is_tlab,
 485                        run_verification && VerifyGCLevel <= 0,
 486                        do_clear_all_soft_refs);
 487 
 488     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 489         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 490       // Allocation request was met by young GC.
 491       size = 0;
 492     }
 493 
 494     // Ask if young collection is enough. If so, do the final steps for young collection,
 495     // and fallthrough to the end.
 496     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 497     if (!do_full_collection) {
 498       // Adjust generation sizes.
 499       _young_gen->compute_new_size();
 500 
 501       print_heap_change(pre_gc_values);
 502 
 503       // Track memory usage and detect low memory after GC finishes
 504       MemoryService::track_memory_usage();
 505 
 506       gc_epilogue(complete);
 507     }
 508 
 509     print_heap_after_gc();
 510 
 511   } else {
 512     // No young collection, ask if we need to perform Full collection.
 513     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 514   }
 515 
 516   if (do_full_collection) {
 517     GCIdMark gc_id_mark;
 518     GCTraceCPUTime tcpu(GenMarkSweep::gc_tracer());
 519     GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
 520 
 521     print_heap_before_gc();
 522 
 523     if (!prepared_for_verification && run_verification &&
 524         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 525       prepare_for_verify();
 526     }
 527 
 528     if (!do_young_collection) {
 529       gc_prologue(complete);
 530       increment_total_collections(complete);
 531     }
 532 
 533     // Accounting quirk: total full collections would be incremented when "complete"
 534     // is set, by calling increment_total_collections above. However, we also need to
 535     // account Full collections that had "complete" unset.
 536     if (!complete) {
 537       increment_total_full_collections();
 538     }
 539 
 540     CodeCache::on_gc_marking_cycle_start();
 541 
 542     collect_generation(_old_gen,
 543                        full,
 544                        size,
 545                        is_tlab,
 546                        run_verification && VerifyGCLevel <= 1,
 547                        do_clear_all_soft_refs);
 548 
 549     CodeCache::on_gc_marking_cycle_finish();
 550     CodeCache::arm_all_nmethods();
 551 
 552     // Adjust generation sizes.
 553     _old_gen->compute_new_size();
 554     _young_gen->compute_new_size();
 555 
 556     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 557     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 558     DEBUG_ONLY(MetaspaceUtils::verify();)
 559 
 560     // Need to clear claim bits for the next mark.
 561     ClassLoaderDataGraph::clear_claimed_marks();
 562 
 563     // Resize the metaspace capacity after full collections
 564     MetaspaceGC::compute_new_size();
 565     update_full_collections_completed();
 566 
 567     print_heap_change(pre_gc_values);
 568 
 569     // Track memory usage and detect low memory after GC finishes
 570     MemoryService::track_memory_usage();
 571 
 572     // Need to tell the epilogue code we are done with Full GC, regardless what was
 573     // the initial value for "complete" flag.
 574     gc_epilogue(true);
 575 
 576     print_heap_after_gc();
 577   }
 578 }
 579 
 580 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 581                                                  GenCollectedHeap::GenerationType max_gen) const {
 582   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 583 }
 584 
 585 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 586   ScavengableNMethods::register_nmethod(nm);
 587 }
 588 
 589 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 590   ScavengableNMethods::unregister_nmethod(nm);
 591 }
 592 
 593 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 594   ScavengableNMethods::verify_nmethod(nm);
 595 }
 596 
 597 void GenCollectedHeap::prune_scavengable_nmethods() {
 598   ScavengableNMethods::prune_nmethods();
 599 }
 600 
 601 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 602   GCCauseSetter x(this, GCCause::_allocation_failure);
 603   HeapWord* result = nullptr;
 604 
 605   assert(size != 0, "Precondition violated");
 606   if (GCLocker::is_active_and_needs_gc()) {
 607     // GC locker is active; instead of a collection we will attempt
 608     // to expand the heap, if there's room for expansion.
 609     if (!is_maximal_no_gc()) {
 610       result = expand_heap_and_allocate(size, is_tlab);
 611     }
 612     return result;   // Could be null if we are out of space.
 613   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 614     // Do an incremental collection.
 615     do_collection(false,                     // full
 616                   false,                     // clear_all_soft_refs
 617                   size,                      // size
 618                   is_tlab,                   // is_tlab
 619                   GenCollectedHeap::OldGen); // max_generation
 620   } else {
 621     log_trace(gc)(" :: Trying full because partial may fail :: ");
 622     // Try a full collection; see delta for bug id 6266275
 623     // for the original code and why this has been simplified
 624     // with from-space allocation criteria modified and
 625     // such allocation moved out of the safepoint path.
 626     do_collection(true,                      // full
 627                   false,                     // clear_all_soft_refs
 628                   size,                      // size
 629                   is_tlab,                   // is_tlab
 630                   GenCollectedHeap::OldGen); // max_generation
 631   }
 632 
 633   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 634 
 635   if (result != nullptr) {
 636     assert(is_in_reserved(result), "result not in heap");
 637     return result;
 638   }
 639 
 640   // OK, collection failed, try expansion.
 641   result = expand_heap_and_allocate(size, is_tlab);
 642   if (result != nullptr) {
 643     return result;
 644   }
 645 
 646   // If we reach this point, we're really out of memory. Try every trick
 647   // we can to reclaim memory. Force collection of soft references. Force
 648   // a complete compaction of the heap. Any additional methods for finding
 649   // free memory should be here, especially if they are expensive. If this
 650   // attempt fails, an OOM exception will be thrown.
 651   {
 652     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 653 
 654     do_collection(true,                      // full
 655                   true,                      // clear_all_soft_refs
 656                   size,                      // size
 657                   is_tlab,                   // is_tlab
 658                   GenCollectedHeap::OldGen); // max_generation
 659   }
 660 
 661   result = attempt_allocation(size, is_tlab, false /* first_only */);
 662   if (result != nullptr) {
 663     assert(is_in_reserved(result), "result not in heap");
 664     return result;
 665   }
 666 
 667   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 668     "Flag should have been handled and cleared prior to this point");
 669 
 670   // What else?  We might try synchronous finalization later.  If the total
 671   // space available is large enough for the allocation, then a more
 672   // complete compaction phase than we've tried so far might be
 673   // appropriate.
 674   return nullptr;
 675 }
 676 
 677 #ifdef ASSERT
 678 class AssertNonScavengableClosure: public OopClosure {
 679 public:
 680   virtual void do_oop(oop* p) {
 681     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 682       "Referent should not be scavengable.");  }
 683   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 684 };
 685 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 686 #endif
 687 
 688 void GenCollectedHeap::process_roots(ScanningOption so,
 689                                      OopClosure* strong_roots,
 690                                      CLDClosure* strong_cld_closure,
 691                                      CLDClosure* weak_cld_closure,
 692                                      CodeBlobToOopClosure* code_roots) {
 693   // General roots.
 694   assert(code_roots != nullptr, "code root closure should always be set");
 695 
 696   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 697 
 698   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 699   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
 700 
 701   Threads::oops_do(strong_roots, roots_from_code_p);
 702 
 703   OopStorageSet::strong_oops_do(strong_roots);
 704 
 705   if (so & SO_ScavengeCodeCache) {
 706     assert(code_roots != nullptr, "must supply closure for code cache");
 707 
 708     // We only visit parts of the CodeCache when scavenging.
 709     ScavengableNMethods::nmethods_do(code_roots);
 710   }
 711   if (so & SO_AllCodeCache) {
 712     assert(code_roots != nullptr, "must supply closure for code cache");
 713 
 714     // CMSCollector uses this to do intermediate-strength collections.
 715     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 716     CodeCache::blobs_do(code_roots);
 717   }
 718   // Verify that the code cache contents are not subject to
 719   // movement by a scavenging collection.
 720   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 721   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 722 }
 723 
 724 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 725   WeakProcessor::oops_do(root_closure);
 726 }
 727 
 728 bool GenCollectedHeap::no_allocs_since_save_marks() {
 729   return _young_gen->no_allocs_since_save_marks() &&
 730          _old_gen->no_allocs_since_save_marks();
 731 }
 732 
 733 // public collection interfaces
 734 void GenCollectedHeap::collect(GCCause::Cause cause) {
 735   // The caller doesn't have the Heap_lock
 736   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 737 
 738   unsigned int gc_count_before;
 739   unsigned int full_gc_count_before;
 740 
 741   {
 742     MutexLocker ml(Heap_lock);
 743     // Read the GC count while holding the Heap_lock
 744     gc_count_before      = total_collections();
 745     full_gc_count_before = total_full_collections();
 746   }
 747 
 748   if (GCLocker::should_discard(cause, gc_count_before)) {
 749     return;
 750   }
 751 
 752   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
 753                            || (cause == GCCause::_gc_locker)
 754                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
 755 
 756   const GenerationType max_generation = should_run_young_gc
 757                                       ? YoungGen
 758                                       : OldGen;
 759 
 760   while (true) {
 761     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 762                         cause, max_generation);
 763     VMThread::execute(&op);
 764 
 765     if (!GCCause::is_explicit_full_gc(cause)) {
 766       return;
 767     }
 768 
 769     {
 770       MutexLocker ml(Heap_lock);
 771       // Read the GC count while holding the Heap_lock
 772       if (full_gc_count_before != total_full_collections()) {
 773         return;
 774       }
 775     }
 776 
 777     if (GCLocker::is_active_and_needs_gc()) {
 778       // If GCLocker is active, wait until clear before retrying.
 779       GCLocker::stall_until_clear();
 780     }
 781   }
 782 }
 783 
 784 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 785    do_full_collection(clear_all_soft_refs, OldGen);
 786 }
 787 
 788 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 789                                           GenerationType last_generation) {
 790   do_collection(true,                   // full
 791                 clear_all_soft_refs,    // clear_all_soft_refs
 792                 0,                      // size
 793                 false,                  // is_tlab
 794                 last_generation);       // last_generation
 795   // Hack XXX FIX ME !!!
 796   // A scavenge may not have been attempted, or may have
 797   // been attempted and failed, because the old gen was too full
 798   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 799     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 800     // This time allow the old gen to be collected as well
 801     do_collection(true,                // full
 802                   clear_all_soft_refs, // clear_all_soft_refs
 803                   0,                   // size
 804                   false,               // is_tlab
 805                   OldGen);             // last_generation
 806   }
 807 }
 808 
 809 bool GenCollectedHeap::is_in_young(const void* p) const {
 810   bool result = p < _old_gen->reserved().start();
 811   assert(result == _young_gen->is_in_reserved(p),
 812          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
 813   return result;
 814 }
 815 
 816 bool GenCollectedHeap::requires_barriers(stackChunkOop obj) const {
 817   return !is_in_young(obj);
 818 }
 819 
 820 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 821 bool GenCollectedHeap::is_in(const void* p) const {
 822   return _young_gen->is_in(p) || _old_gen->is_in(p);
 823 }
 824 
 825 #ifdef ASSERT
 826 // Don't implement this by using is_in_young().  This method is used
 827 // in some cases to check that is_in_young() is correct.
 828 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 829   assert(is_in_reserved(p) || p == nullptr,
 830     "Does not work if address is non-null and outside of the heap");
 831   return p < _young_gen->reserved().end() && p != nullptr;
 832 }
 833 #endif
 834 
 835 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
 836   _young_gen->oop_iterate(cl);
 837   _old_gen->oop_iterate(cl);
 838 }
 839 
 840 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 841   _young_gen->object_iterate(cl);
 842   _old_gen->object_iterate(cl);
 843 }
 844 
 845 Space* GenCollectedHeap::space_containing(const void* addr) const {
 846   Space* res = _young_gen->space_containing(addr);
 847   if (res != nullptr) {
 848     return res;
 849   }
 850   res = _old_gen->space_containing(addr);
 851   assert(res != nullptr, "Could not find containing space");
 852   return res;
 853 }
 854 
 855 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 856   assert(is_in_reserved(addr), "block_start of address outside of heap");
 857   if (_young_gen->is_in_reserved(addr)) {
 858     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 859     return _young_gen->block_start(addr);
 860   }
 861 
 862   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 863   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 864   return _old_gen->block_start(addr);
 865 }
 866 
 867 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 868   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 869   assert(block_start(addr) == addr, "addr must be a block start");
 870   if (_young_gen->is_in_reserved(addr)) {
 871     return _young_gen->block_is_obj(addr);
 872   }
 873 
 874   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 875   return _old_gen->block_is_obj(addr);
 876 }
 877 
 878 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 879   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 880   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 881   return _young_gen->tlab_capacity();
 882 }
 883 
 884 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 885   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 886   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 887   return _young_gen->tlab_used();
 888 }
 889 
 890 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 891   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 892   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 893   return _young_gen->unsafe_max_tlab_alloc();
 894 }
 895 
 896 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
 897                                               size_t requested_size,
 898                                               size_t* actual_size) {
 899   HeapWord* result = mem_allocate_work(requested_size /* size */,
 900                                        true /* is_tlab */);
 901   if (result != nullptr) {
 902     *actual_size = requested_size;
 903   }
 904 
 905   return result;
 906 }
 907 
 908 void GenCollectedHeap::prepare_for_verify() {
 909   ensure_parsability(false);        // no need to retire TLABs
 910 }
 911 
 912 void GenCollectedHeap::generation_iterate(GenClosure* cl,
 913                                           bool old_to_young) {
 914   if (old_to_young) {
 915     cl->do_generation(_old_gen);
 916     cl->do_generation(_young_gen);
 917   } else {
 918     cl->do_generation(_young_gen);
 919     cl->do_generation(_old_gen);
 920   }
 921 }
 922 
 923 bool GenCollectedHeap::is_maximal_no_gc() const {
 924   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
 925 }
 926 
 927 void GenCollectedHeap::save_marks() {
 928   _young_gen->save_marks();
 929   _old_gen->save_marks();
 930 }
 931 
 932 GenCollectedHeap* GenCollectedHeap::heap() {
 933   // SerialHeap is the only subtype of GenCollectedHeap.
 934   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
 935 }
 936 
 937 #if INCLUDE_SERIALGC
 938 void GenCollectedHeap::prepare_for_compaction() {
 939   // Start by compacting into same gen.
 940   CompactPoint cp(_old_gen);
 941   _old_gen->prepare_for_compaction(&cp);
 942   _young_gen->prepare_for_compaction(&cp);
 943 }
 944 #endif // INCLUDE_SERIALGC
 945 
 946 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
 947   log_debug(gc, verify)("%s", _old_gen->name());
 948   _old_gen->verify();
 949 
 950   log_debug(gc, verify)("%s", _young_gen->name());
 951   _young_gen->verify();
 952 
 953   log_debug(gc, verify)("RemSet");
 954   rem_set()->verify();
 955 }
 956 
 957 void GenCollectedHeap::print_on(outputStream* st) const {
 958   if (_young_gen != nullptr) {
 959     _young_gen->print_on(st);
 960   }
 961   if (_old_gen != nullptr) {
 962     _old_gen->print_on(st);
 963   }
 964   MetaspaceUtils::print_on(st);
 965 }
 966 
 967 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 968 }
 969 
 970 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
 971   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
 972 }
 973 
 974 void GenCollectedHeap::print_tracing_info() const {
 975   if (log_is_enabled(Debug, gc, heap, exit)) {
 976     LogStreamHandle(Debug, gc, heap, exit) lsh;
 977     _young_gen->print_summary_info_on(&lsh);
 978     _old_gen->print_summary_info_on(&lsh);
 979   }
 980 }
 981 
 982 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 983   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 984 
 985   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
 986                      HEAP_CHANGE_FORMAT" "
 987                      HEAP_CHANGE_FORMAT,
 988                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
 989                                              pre_gc_values.young_gen_used(),
 990                                              pre_gc_values.young_gen_capacity(),
 991                                              def_new_gen->used(),
 992                                              def_new_gen->capacity()),
 993                      HEAP_CHANGE_FORMAT_ARGS("Eden",
 994                                              pre_gc_values.eden_used(),
 995                                              pre_gc_values.eden_capacity(),
 996                                              def_new_gen->eden()->used(),
 997                                              def_new_gen->eden()->capacity()),
 998                      HEAP_CHANGE_FORMAT_ARGS("From",
 999                                              pre_gc_values.from_used(),
1000                                              pre_gc_values.from_capacity(),
1001                                              def_new_gen->from()->used(),
1002                                              def_new_gen->from()->capacity()));
1003   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1004                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1005                                              pre_gc_values.old_gen_used(),
1006                                              pre_gc_values.old_gen_capacity(),
1007                                              old_gen()->used(),
1008                                              old_gen()->capacity()));
1009   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1010 }
1011 
1012 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1013  private:
1014   bool _full;
1015  public:
1016   void do_generation(Generation* gen) {
1017     gen->gc_prologue(_full);
1018   }
1019   GenGCPrologueClosure(bool full) : _full(full) {};
1020 };
1021 
1022 void GenCollectedHeap::gc_prologue(bool full) {
1023   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1024 
1025   // Fill TLAB's and such
1026   ensure_parsability(true);   // retire TLABs
1027 
1028   // Walk generations
1029   GenGCPrologueClosure blk(full);
1030   generation_iterate(&blk, false);  // not old-to-young.
1031 };
1032 
1033 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1034  private:
1035   bool _full;
1036  public:
1037   void do_generation(Generation* gen) {
1038     gen->gc_epilogue(_full);
1039   }
1040   GenGCEpilogueClosure(bool full) : _full(full) {};
1041 };
1042 
1043 void GenCollectedHeap::gc_epilogue(bool full) {
1044 #if COMPILER2_OR_JVMCI
1045   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1046 #endif // COMPILER2_OR_JVMCI
1047 
1048   resize_all_tlabs();
1049 
1050   GenGCEpilogueClosure blk(full);
1051   generation_iterate(&blk, false);  // not old-to-young.
1052 
1053   MetaspaceCounters::update_performance_counters();
1054 };
1055 
1056 #ifndef PRODUCT
1057 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1058  private:
1059  public:
1060   void do_generation(Generation* gen) {
1061     gen->record_spaces_top();
1062   }
1063 };
1064 
1065 void GenCollectedHeap::record_gen_tops_before_GC() {
1066   if (ZapUnusedHeapArea) {
1067     GenGCSaveTopsBeforeGCClosure blk;
1068     generation_iterate(&blk, false);  // not old-to-young.
1069   }
1070 }
1071 #endif  // not PRODUCT
1072 
1073 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1074  public:
1075   void do_generation(Generation* gen) {
1076     gen->ensure_parsability();
1077   }
1078 };
1079 
1080 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1081   CollectedHeap::ensure_parsability(retire_tlabs);
1082   GenEnsureParsabilityClosure ep_cl;
1083   generation_iterate(&ep_cl, false);
1084 }