1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/serial/cardTableRS.hpp"
  34 #include "gc/serial/defNewGeneration.hpp"
  35 #include "gc/serial/genMarkSweep.hpp"
  36 #include "gc/serial/markSweep.hpp"
  37 #include "gc/shared/adaptiveSizePolicy.hpp"
  38 #include "gc/shared/cardTableBarrierSet.hpp"
  39 #include "gc/shared/classUnloadingContext.hpp"
  40 #include "gc/shared/collectedHeap.inline.hpp"
  41 #include "gc/shared/collectorCounters.hpp"
  42 #include "gc/shared/continuationGCSupport.inline.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcInitLogger.hpp"
  45 #include "gc/shared/gcLocker.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTrace.hpp"
  48 #include "gc/shared/gcTraceTime.inline.hpp"
  49 #include "gc/shared/gcVMOperations.hpp"
  50 #include "gc/shared/genArguments.hpp"
  51 #include "gc/shared/genCollectedHeap.hpp"
  52 #include "gc/shared/generationSpec.hpp"
  53 #include "gc/shared/locationPrinter.inline.hpp"
  54 #include "gc/shared/oopStorage.inline.hpp"
  55 #include "gc/shared/oopStorageParState.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/scavengableNMethods.hpp"
  58 #include "gc/shared/space.hpp"
  59 #include "gc/shared/strongRootsScope.hpp"
  60 #include "gc/shared/weakProcessor.hpp"
  61 #include "gc/shared/workerThread.hpp"
  62 #include "memory/iterator.hpp"
  63 #include "memory/metaspaceCounters.hpp"
  64 #include "memory/metaspaceUtils.hpp"
  65 #include "memory/resourceArea.hpp"
  66 #include "memory/universe.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "runtime/handles.hpp"
  69 #include "runtime/handles.inline.hpp"
  70 #include "runtime/java.hpp"
  71 #include "runtime/threads.hpp"
  72 #include "runtime/vmThread.hpp"
  73 #include "services/memoryService.hpp"
  74 #include "utilities/autoRestore.hpp"
  75 #include "utilities/debug.hpp"
  76 #include "utilities/formatBuffer.hpp"
  77 #include "utilities/macros.hpp"
  78 #include "utilities/stack.inline.hpp"
  79 #include "utilities/vmError.hpp"
  80 #if INCLUDE_JVMCI
  81 #include "jvmci/jvmci.hpp"
  82 #endif
  83 
  84 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  85                                    Generation::Name old,
  86                                    const char* policy_counters_name) :
  87   CollectedHeap(),
  88   _young_gen(nullptr),
  89   _old_gen(nullptr),
  90   _young_gen_spec(new GenerationSpec(young,
  91                                      NewSize,
  92                                      MaxNewSize,
  93                                      GenAlignment)),
  94   _old_gen_spec(new GenerationSpec(old,
  95                                    OldSize,
  96                                    MaxOldSize,
  97                                    GenAlignment)),
  98   _rem_set(nullptr),
  99   _soft_ref_gen_policy(),
 100   _size_policy(nullptr),
 101   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
 102   _incremental_collection_failed(false),
 103   _full_collections_completed(0),
 104   _young_manager(nullptr),
 105   _old_manager(nullptr) {
 106 }
 107 
 108 jint GenCollectedHeap::initialize() {
 109   // Allocate space for the heap.
 110 
 111   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 112 
 113   if (!heap_rs.is_reserved()) {
 114     vm_shutdown_during_initialization(
 115       "Could not reserve enough space for object heap");
 116     return JNI_ENOMEM;
 117   }
 118 
 119   initialize_reserved_region(heap_rs);
 120 
 121   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
 122   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 123 
 124   _rem_set = create_rem_set(heap_rs.region());
 125   _rem_set->initialize(young_rs.base(), old_rs.base());
 126 
 127   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 128   bs->initialize();
 129   BarrierSet::set_barrier_set(bs);
 130 
 131   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 132   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 133 
 134   GCInitLogger::print();
 135 
 136   return JNI_OK;
 137 }
 138 
 139 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 140   return new CardTableRS(reserved_region);
 141 }
 142 
 143 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
 144                                               size_t init_promo_size,
 145                                               size_t init_survivor_size) {
 146   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
 147   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 148                                         init_promo_size,
 149                                         init_survivor_size,
 150                                         max_gc_pause_sec,
 151                                         GCTimeRatio);
 152 }
 153 
 154 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
 155   // Now figure out the total size.
 156   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 157   assert(alignment % pageSize == 0, "Must be");
 158 
 159   // Check for overflow.
 160   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 161   if (total_reserved < _young_gen_spec->max_size()) {
 162     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 163                                   "the maximum representable size");
 164   }
 165   assert(total_reserved % alignment == 0,
 166          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 167          SIZE_FORMAT, total_reserved, alignment);
 168 
 169   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 170   size_t used_page_size = heap_rs.page_size();
 171 
 172   os::trace_page_sizes("Heap",
 173                        MinHeapSize,
 174                        total_reserved,
 175                        used_page_size,
 176                        heap_rs.base(),
 177                        heap_rs.size());
 178 
 179   return heap_rs;
 180 }
 181 
 182 class GenIsScavengable : public BoolObjectClosure {
 183 public:
 184   bool do_object_b(oop obj) {
 185     return GenCollectedHeap::heap()->is_in_young(obj);
 186   }
 187 };
 188 
 189 static GenIsScavengable _is_scavengable;
 190 
 191 void GenCollectedHeap::post_initialize() {
 192   CollectedHeap::post_initialize();
 193 
 194   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 195 
 196   def_new_gen->ref_processor_init();
 197 
 198   initialize_size_policy(def_new_gen->eden()->capacity(),
 199                          _old_gen->capacity(),
 200                          def_new_gen->from()->capacity());
 201 
 202   MarkSweep::initialize();
 203 
 204   ScavengableNMethods::initialize(&_is_scavengable);
 205 }
 206 
 207 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
 208   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 209 
 210   return PreGenGCValues(def_new_gen->used(),
 211                         def_new_gen->capacity(),
 212                         def_new_gen->eden()->used(),
 213                         def_new_gen->eden()->capacity(),
 214                         def_new_gen->from()->used(),
 215                         def_new_gen->from()->capacity(),
 216                         old_gen()->used(),
 217                         old_gen()->capacity());
 218 }
 219 
 220 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 221   return _young_gen_spec;
 222 }
 223 
 224 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 225   return _old_gen_spec;
 226 }
 227 
 228 size_t GenCollectedHeap::capacity() const {
 229   return _young_gen->capacity() + _old_gen->capacity();
 230 }
 231 
 232 size_t GenCollectedHeap::used() const {
 233   return _young_gen->used() + _old_gen->used();
 234 }
 235 
 236 void GenCollectedHeap::save_used_regions() {
 237   _old_gen->save_used_region();
 238   _young_gen->save_used_region();
 239 }
 240 
 241 size_t GenCollectedHeap::max_capacity() const {
 242   return _young_gen->max_capacity() + _old_gen->max_capacity();
 243 }
 244 
 245 // Update the _full_collections_completed counter
 246 // at the end of a stop-world full GC.
 247 unsigned int GenCollectedHeap::update_full_collections_completed() {
 248   assert(_full_collections_completed <= _total_full_collections,
 249          "Can't complete more collections than were started");
 250   _full_collections_completed = _total_full_collections;
 251   return _full_collections_completed;
 252 }
 253 
 254 // Return true if any of the following is true:
 255 // . the allocation won't fit into the current young gen heap
 256 // . gc locker is occupied (jni critical section)
 257 // . heap memory is tight -- the most recent previous collection
 258 //   was a full collection because a partial collection (would
 259 //   have) failed and is likely to fail again
 260 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 261   size_t young_capacity = _young_gen->capacity_before_gc();
 262   return    (word_size > heap_word_size(young_capacity))
 263          || GCLocker::is_active_and_needs_gc()
 264          || incremental_collection_failed();
 265 }
 266 
 267 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 268   HeapWord* result = nullptr;
 269   if (_old_gen->should_allocate(size, is_tlab)) {
 270     result = _old_gen->expand_and_allocate(size, is_tlab);
 271   }
 272   if (result == nullptr) {
 273     if (_young_gen->should_allocate(size, is_tlab)) {
 274       result = _young_gen->expand_and_allocate(size, is_tlab);
 275     }
 276   }
 277   assert(result == nullptr || is_in_reserved(result), "result not in heap");
 278   return result;
 279 }
 280 
 281 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 282                                               bool is_tlab) {
 283 
 284   HeapWord* result = nullptr;
 285 
 286   // Loop until the allocation is satisfied, or unsatisfied after GC.
 287   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 288 
 289     // First allocation attempt is lock-free.
 290     Generation *young = _young_gen;
 291     if (young->should_allocate(size, is_tlab)) {
 292       result = young->par_allocate(size, is_tlab);
 293       if (result != nullptr) {
 294         assert(is_in_reserved(result), "result not in heap");
 295         return result;
 296       }
 297     }
 298     uint gc_count_before;  // Read inside the Heap_lock locked region.
 299     {
 300       MutexLocker ml(Heap_lock);
 301       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 302       // Note that only large objects get a shot at being
 303       // allocated in later generations.
 304       bool first_only = !should_try_older_generation_allocation(size);
 305 
 306       result = attempt_allocation(size, is_tlab, first_only);
 307       if (result != nullptr) {
 308         assert(is_in_reserved(result), "result not in heap");
 309         return result;
 310       }
 311 
 312       if (GCLocker::is_active_and_needs_gc()) {
 313         if (is_tlab) {
 314           return nullptr;  // Caller will retry allocating individual object.
 315         }
 316         if (!is_maximal_no_gc()) {
 317           // Try and expand heap to satisfy request.
 318           result = expand_heap_and_allocate(size, is_tlab);
 319           // Result could be null if we are out of space.
 320           if (result != nullptr) {
 321             return result;
 322           }
 323         }
 324 
 325         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 326           return nullptr; // We didn't get to do a GC and we didn't get any memory.
 327         }
 328 
 329         // If this thread is not in a jni critical section, we stall
 330         // the requestor until the critical section has cleared and
 331         // GC allowed. When the critical section clears, a GC is
 332         // initiated by the last thread exiting the critical section; so
 333         // we retry the allocation sequence from the beginning of the loop,
 334         // rather than causing more, now probably unnecessary, GC attempts.
 335         JavaThread* jthr = JavaThread::current();
 336         if (!jthr->in_critical()) {
 337           MutexUnlocker mul(Heap_lock);
 338           // Wait for JNI critical section to be exited
 339           GCLocker::stall_until_clear();
 340           gclocker_stalled_count += 1;
 341           continue;
 342         } else {
 343           if (CheckJNICalls) {
 344             fatal("Possible deadlock due to allocating while"
 345                   " in jni critical section");
 346           }
 347           return nullptr;
 348         }
 349       }
 350 
 351       // Read the gc count while the heap lock is held.
 352       gc_count_before = total_collections();
 353     }
 354 
 355     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 356     VMThread::execute(&op);
 357     if (op.prologue_succeeded()) {
 358       result = op.result();
 359       if (op.gc_locked()) {
 360          assert(result == nullptr, "must be null if gc_locked() is true");
 361          continue;  // Retry and/or stall as necessary.
 362       }
 363 
 364       assert(result == nullptr || is_in_reserved(result),
 365              "result not in heap");
 366       return result;
 367     }
 368 
 369     // Give a warning if we seem to be looping forever.
 370     if ((QueuedAllocationWarningCount > 0) &&
 371         (try_count % QueuedAllocationWarningCount == 0)) {
 372           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 373                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 374     }
 375   }
 376 }
 377 
 378 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 379                                                bool is_tlab,
 380                                                bool first_only) {
 381   HeapWord* res = nullptr;
 382 
 383   if (_young_gen->should_allocate(size, is_tlab)) {
 384     res = _young_gen->allocate(size, is_tlab);
 385     if (res != nullptr || first_only) {
 386       return res;
 387     }
 388   }
 389 
 390   if (_old_gen->should_allocate(size, is_tlab)) {
 391     res = _old_gen->allocate(size, is_tlab);
 392   }
 393 
 394   return res;
 395 }
 396 
 397 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 398                                          bool* gc_overhead_limit_was_exceeded) {
 399   return mem_allocate_work(size,
 400                            false /* is_tlab */);
 401 }
 402 
 403 bool GenCollectedHeap::must_clear_all_soft_refs() {
 404   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 405          _gc_cause == GCCause::_wb_full_gc;
 406 }
 407 
 408 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 409                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
 410   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 411   GCTraceTime(Trace, gc, phases) t1(title);
 412   TraceCollectorStats tcs(gen->counters());
 413   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC");
 414 
 415   gen->stat_record()->invocations++;
 416   gen->stat_record()->accumulated_time.start();
 417 
 418   // Must be done anew before each collection because
 419   // a previous collection will do mangling and will
 420   // change top of some spaces.
 421   record_gen_tops_before_GC();
 422 
 423   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 424 
 425   if (run_verification && VerifyBeforeGC) {
 426     Universe::verify("Before GC");
 427   }
 428   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 429 
 430   // Do collection work
 431   {
 432     save_marks();   // save marks for all gens
 433 
 434     gen->collect(full, clear_soft_refs, size, is_tlab);
 435   }
 436 
 437   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 438 
 439   gen->stat_record()->accumulated_time.stop();
 440 
 441   update_gc_stats(gen, full);
 442 
 443   if (run_verification && VerifyAfterGC) {
 444     Universe::verify("After GC");
 445   }
 446 }
 447 
 448 void GenCollectedHeap::do_collection(bool           full,
 449                                      bool           clear_all_soft_refs,
 450                                      size_t         size,
 451                                      bool           is_tlab,
 452                                      GenerationType max_generation) {
 453   ResourceMark rm;
 454   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 455 
 456   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 457   assert(my_thread->is_VM_thread(), "only VM thread");
 458   assert(Heap_lock->is_locked(),
 459          "the requesting thread should have the Heap_lock");
 460   guarantee(!is_stw_gc_active(), "collection is not reentrant");
 461 
 462   if (GCLocker::check_active_before_gc()) {
 463     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 464   }
 465 
 466   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 467                           soft_ref_policy()->should_clear_all_soft_refs();
 468 
 469   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 470 
 471   AutoModifyRestore<bool> temporarily(_is_stw_gc_active, true);
 472 
 473   bool complete = full && (max_generation == OldGen);
 474   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 475   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 476 
 477   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 478 
 479   bool run_verification = total_collections() >= VerifyGCStartAt;
 480   bool prepared_for_verification = false;
 481   bool do_full_collection = false;
 482 
 483   if (do_young_collection) {
 484     GCIdMark gc_id_mark;
 485     GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
 486     GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
 487 
 488     print_heap_before_gc();
 489 
 490     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 491       prepare_for_verify();
 492       prepared_for_verification = true;
 493     }
 494 
 495     gc_prologue(complete);
 496     increment_total_collections(complete);
 497 
 498     collect_generation(_young_gen,
 499                        full,
 500                        size,
 501                        is_tlab,
 502                        run_verification && VerifyGCLevel <= 0,
 503                        do_clear_all_soft_refs);
 504 
 505     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 506         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 507       // Allocation request was met by young GC.
 508       size = 0;
 509     }
 510 
 511     // Ask if young collection is enough. If so, do the final steps for young collection,
 512     // and fallthrough to the end.
 513     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 514     if (!do_full_collection) {
 515       // Adjust generation sizes.
 516       _young_gen->compute_new_size();
 517 
 518       print_heap_change(pre_gc_values);
 519 
 520       // Track memory usage and detect low memory after GC finishes
 521       MemoryService::track_memory_usage();
 522 
 523       gc_epilogue(complete);
 524     }
 525 
 526     print_heap_after_gc();
 527 
 528   } else {
 529     // No young collection, ask if we need to perform Full collection.
 530     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 531   }
 532 
 533   if (do_full_collection) {
 534     GCIdMark gc_id_mark;
 535     GCTraceCPUTime tcpu(GenMarkSweep::gc_tracer());
 536     GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
 537 
 538     print_heap_before_gc();
 539 
 540     if (!prepared_for_verification && run_verification &&
 541         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 542       prepare_for_verify();
 543     }
 544 
 545     if (!do_young_collection) {
 546       gc_prologue(complete);
 547       increment_total_collections(complete);
 548     }
 549 
 550     // Accounting quirk: total full collections would be incremented when "complete"
 551     // is set, by calling increment_total_collections above. However, we also need to
 552     // account Full collections that had "complete" unset.
 553     if (!complete) {
 554       increment_total_full_collections();
 555     }
 556 
 557     CodeCache::on_gc_marking_cycle_start();
 558 
 559     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
 560                               false /* unregister_nmethods_during_purge */,
 561                               false /* lock_codeblob_free_separately */);
 562 
 563     collect_generation(_old_gen,
 564                        full,
 565                        size,
 566                        is_tlab,
 567                        run_verification && VerifyGCLevel <= 1,
 568                        do_clear_all_soft_refs);
 569 
 570     CodeCache::on_gc_marking_cycle_finish();
 571     CodeCache::arm_all_nmethods();
 572 
 573     // Adjust generation sizes.
 574     _old_gen->compute_new_size();
 575     _young_gen->compute_new_size();
 576 
 577     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 578     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 579     DEBUG_ONLY(MetaspaceUtils::verify();)
 580 
 581     // Need to clear claim bits for the next mark.
 582     ClassLoaderDataGraph::clear_claimed_marks();
 583 
 584     // Resize the metaspace capacity after full collections
 585     MetaspaceGC::compute_new_size();
 586     update_full_collections_completed();
 587 
 588     print_heap_change(pre_gc_values);
 589 
 590     // Track memory usage and detect low memory after GC finishes
 591     MemoryService::track_memory_usage();
 592 
 593     // Need to tell the epilogue code we are done with Full GC, regardless what was
 594     // the initial value for "complete" flag.
 595     gc_epilogue(true);
 596 
 597     print_heap_after_gc();
 598   }
 599 }
 600 
 601 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 602                                                  GenCollectedHeap::GenerationType max_gen) const {
 603   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 604 }
 605 
 606 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 607   ScavengableNMethods::register_nmethod(nm);
 608 }
 609 
 610 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 611   ScavengableNMethods::unregister_nmethod(nm);
 612 }
 613 
 614 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 615   ScavengableNMethods::verify_nmethod(nm);
 616 }
 617 
 618 void GenCollectedHeap::prune_scavengable_nmethods() {
 619   ScavengableNMethods::prune_nmethods_not_into_young();
 620 }
 621 
 622 void GenCollectedHeap::prune_unlinked_nmethods() {
 623   ScavengableNMethods::prune_unlinked_nmethods();
 624 }
 625 
 626 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 627   GCCauseSetter x(this, GCCause::_allocation_failure);
 628   HeapWord* result = nullptr;
 629 
 630   assert(size != 0, "Precondition violated");
 631   if (GCLocker::is_active_and_needs_gc()) {
 632     // GC locker is active; instead of a collection we will attempt
 633     // to expand the heap, if there's room for expansion.
 634     if (!is_maximal_no_gc()) {
 635       result = expand_heap_and_allocate(size, is_tlab);
 636     }
 637     return result;   // Could be null if we are out of space.
 638   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 639     // Do an incremental collection.
 640     do_collection(false,                     // full
 641                   false,                     // clear_all_soft_refs
 642                   size,                      // size
 643                   is_tlab,                   // is_tlab
 644                   GenCollectedHeap::OldGen); // max_generation
 645   } else {
 646     log_trace(gc)(" :: Trying full because partial may fail :: ");
 647     // Try a full collection; see delta for bug id 6266275
 648     // for the original code and why this has been simplified
 649     // with from-space allocation criteria modified and
 650     // such allocation moved out of the safepoint path.
 651     do_collection(true,                      // full
 652                   false,                     // clear_all_soft_refs
 653                   size,                      // size
 654                   is_tlab,                   // is_tlab
 655                   GenCollectedHeap::OldGen); // max_generation
 656   }
 657 
 658   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 659 
 660   if (result != nullptr) {
 661     assert(is_in_reserved(result), "result not in heap");
 662     return result;
 663   }
 664 
 665   // OK, collection failed, try expansion.
 666   result = expand_heap_and_allocate(size, is_tlab);
 667   if (result != nullptr) {
 668     return result;
 669   }
 670 
 671   // If we reach this point, we're really out of memory. Try every trick
 672   // we can to reclaim memory. Force collection of soft references. Force
 673   // a complete compaction of the heap. Any additional methods for finding
 674   // free memory should be here, especially if they are expensive. If this
 675   // attempt fails, an OOM exception will be thrown.
 676   {
 677     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 678 
 679     do_collection(true,                      // full
 680                   true,                      // clear_all_soft_refs
 681                   size,                      // size
 682                   is_tlab,                   // is_tlab
 683                   GenCollectedHeap::OldGen); // max_generation
 684   }
 685 
 686   result = attempt_allocation(size, is_tlab, false /* first_only */);
 687   if (result != nullptr) {
 688     assert(is_in_reserved(result), "result not in heap");
 689     return result;
 690   }
 691 
 692   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 693     "Flag should have been handled and cleared prior to this point");
 694 
 695   // What else?  We might try synchronous finalization later.  If the total
 696   // space available is large enough for the allocation, then a more
 697   // complete compaction phase than we've tried so far might be
 698   // appropriate.
 699   return nullptr;
 700 }
 701 
 702 #ifdef ASSERT
 703 class AssertNonScavengableClosure: public OopClosure {
 704 public:
 705   virtual void do_oop(oop* p) {
 706     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 707       "Referent should not be scavengable.");  }
 708   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 709 };
 710 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 711 #endif
 712 
 713 void GenCollectedHeap::process_roots(ScanningOption so,
 714                                      OopClosure* strong_roots,
 715                                      CLDClosure* strong_cld_closure,
 716                                      CLDClosure* weak_cld_closure,
 717                                      CodeBlobToOopClosure* code_roots) {
 718   // General roots.
 719   assert(code_roots != nullptr, "code root closure should always be set");
 720 
 721   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 722 
 723   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 724   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
 725 
 726   Threads::oops_do(strong_roots, roots_from_code_p);
 727 
 728   OopStorageSet::strong_oops_do(strong_roots);
 729 
 730   if (so & SO_ScavengeCodeCache) {
 731     assert(code_roots != nullptr, "must supply closure for code cache");
 732 
 733     // We only visit parts of the CodeCache when scavenging.
 734     ScavengableNMethods::nmethods_do(code_roots);
 735   }
 736   if (so & SO_AllCodeCache) {
 737     assert(code_roots != nullptr, "must supply closure for code cache");
 738 
 739     // CMSCollector uses this to do intermediate-strength collections.
 740     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 741     CodeCache::blobs_do(code_roots);
 742   }
 743   // Verify that the code cache contents are not subject to
 744   // movement by a scavenging collection.
 745   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 746   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 747 }
 748 
 749 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 750   WeakProcessor::oops_do(root_closure);
 751 }
 752 
 753 bool GenCollectedHeap::no_allocs_since_save_marks() {
 754   return _young_gen->no_allocs_since_save_marks() &&
 755          _old_gen->no_allocs_since_save_marks();
 756 }
 757 
 758 // public collection interfaces
 759 void GenCollectedHeap::collect(GCCause::Cause cause) {
 760   // The caller doesn't have the Heap_lock
 761   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 762 
 763   unsigned int gc_count_before;
 764   unsigned int full_gc_count_before;
 765 
 766   {
 767     MutexLocker ml(Heap_lock);
 768     // Read the GC count while holding the Heap_lock
 769     gc_count_before      = total_collections();
 770     full_gc_count_before = total_full_collections();
 771   }
 772 
 773   if (GCLocker::should_discard(cause, gc_count_before)) {
 774     return;
 775   }
 776 
 777   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
 778                            || (cause == GCCause::_gc_locker)
 779                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
 780 
 781   const GenerationType max_generation = should_run_young_gc
 782                                       ? YoungGen
 783                                       : OldGen;
 784 
 785   while (true) {
 786     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 787                         cause, max_generation);
 788     VMThread::execute(&op);
 789 
 790     if (!GCCause::is_explicit_full_gc(cause)) {
 791       return;
 792     }
 793 
 794     {
 795       MutexLocker ml(Heap_lock);
 796       // Read the GC count while holding the Heap_lock
 797       if (full_gc_count_before != total_full_collections()) {
 798         return;
 799       }
 800     }
 801 
 802     if (GCLocker::is_active_and_needs_gc()) {
 803       // If GCLocker is active, wait until clear before retrying.
 804       GCLocker::stall_until_clear();
 805     }
 806   }
 807 }
 808 
 809 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 810    do_full_collection(clear_all_soft_refs, OldGen);
 811 }
 812 
 813 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 814                                           GenerationType last_generation) {
 815   do_collection(true,                   // full
 816                 clear_all_soft_refs,    // clear_all_soft_refs
 817                 0,                      // size
 818                 false,                  // is_tlab
 819                 last_generation);       // last_generation
 820   // Hack XXX FIX ME !!!
 821   // A scavenge may not have been attempted, or may have
 822   // been attempted and failed, because the old gen was too full
 823   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 824     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 825     // This time allow the old gen to be collected as well
 826     do_collection(true,                // full
 827                   clear_all_soft_refs, // clear_all_soft_refs
 828                   0,                   // size
 829                   false,               // is_tlab
 830                   OldGen);             // last_generation
 831   }
 832 }
 833 
 834 bool GenCollectedHeap::is_in_young(const void* p) const {
 835   bool result = p < _old_gen->reserved().start();
 836   assert(result == _young_gen->is_in_reserved(p),
 837          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
 838   return result;
 839 }
 840 
 841 bool GenCollectedHeap::requires_barriers(stackChunkOop obj) const {
 842   return !is_in_young(obj);
 843 }
 844 
 845 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 846 bool GenCollectedHeap::is_in(const void* p) const {
 847   return _young_gen->is_in(p) || _old_gen->is_in(p);
 848 }
 849 
 850 #ifdef ASSERT
 851 // Don't implement this by using is_in_young().  This method is used
 852 // in some cases to check that is_in_young() is correct.
 853 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 854   assert(is_in_reserved(p) || p == nullptr,
 855     "Does not work if address is non-null and outside of the heap");
 856   return p < _young_gen->reserved().end() && p != nullptr;
 857 }
 858 #endif
 859 
 860 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
 861   _young_gen->oop_iterate(cl);
 862   _old_gen->oop_iterate(cl);
 863 }
 864 
 865 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 866   _young_gen->object_iterate(cl);
 867   _old_gen->object_iterate(cl);
 868 }
 869 
 870 Space* GenCollectedHeap::space_containing(const void* addr) const {
 871   Space* res = _young_gen->space_containing(addr);
 872   if (res != nullptr) {
 873     return res;
 874   }
 875   res = _old_gen->space_containing(addr);
 876   assert(res != nullptr, "Could not find containing space");
 877   return res;
 878 }
 879 
 880 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 881   assert(is_in_reserved(addr), "block_start of address outside of heap");
 882   if (_young_gen->is_in_reserved(addr)) {
 883     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 884     return _young_gen->block_start(addr);
 885   }
 886 
 887   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 888   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 889   return _old_gen->block_start(addr);
 890 }
 891 
 892 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 893   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 894   assert(block_start(addr) == addr, "addr must be a block start");
 895   if (_young_gen->is_in_reserved(addr)) {
 896     return _young_gen->block_is_obj(addr);
 897   }
 898 
 899   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 900   return _old_gen->block_is_obj(addr);
 901 }
 902 
 903 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 904   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 905   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 906   return _young_gen->tlab_capacity();
 907 }
 908 
 909 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 910   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 911   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 912   return _young_gen->tlab_used();
 913 }
 914 
 915 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 916   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 917   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 918   return _young_gen->unsafe_max_tlab_alloc();
 919 }
 920 
 921 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
 922                                               size_t requested_size,
 923                                               size_t* actual_size) {
 924   HeapWord* result = mem_allocate_work(requested_size /* size */,
 925                                        true /* is_tlab */);
 926   if (result != nullptr) {
 927     *actual_size = requested_size;
 928   }
 929 
 930   return result;
 931 }
 932 
 933 // Requires "*prev_ptr" to be non-null.  Deletes and a block of minimal size
 934 // from the list headed by "*prev_ptr".
 935 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
 936   bool first = true;
 937   size_t min_size = 0;   // "first" makes this conceptually infinite.
 938   ScratchBlock **smallest_ptr, *smallest;
 939   ScratchBlock  *cur = *prev_ptr;
 940   while (cur) {
 941     assert(*prev_ptr == cur, "just checking");
 942     if (first || cur->num_words < min_size) {
 943       smallest_ptr = prev_ptr;
 944       smallest     = cur;
 945       min_size     = smallest->num_words;
 946       first        = false;
 947     }
 948     prev_ptr = &cur->next;
 949     cur     =  cur->next;
 950   }
 951   smallest      = *smallest_ptr;
 952   *smallest_ptr = smallest->next;
 953   return smallest;
 954 }
 955 
 956 // Sort the scratch block list headed by res into decreasing size order,
 957 // and set "res" to the result.
 958 static void sort_scratch_list(ScratchBlock*& list) {
 959   ScratchBlock* sorted = nullptr;
 960   ScratchBlock* unsorted = list;
 961   while (unsorted) {
 962     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
 963     smallest->next  = sorted;
 964     sorted          = smallest;
 965   }
 966   list = sorted;
 967 }
 968 
 969 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
 970                                                size_t max_alloc_words) {
 971   ScratchBlock* res = nullptr;
 972   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
 973   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
 974   sort_scratch_list(res);
 975   return res;
 976 }
 977 
 978 void GenCollectedHeap::release_scratch() {
 979   _young_gen->reset_scratch();
 980   _old_gen->reset_scratch();
 981 }
 982 
 983 void GenCollectedHeap::prepare_for_verify() {
 984   ensure_parsability(false);        // no need to retire TLABs
 985 }
 986 
 987 void GenCollectedHeap::generation_iterate(GenClosure* cl,
 988                                           bool old_to_young) {
 989   if (old_to_young) {
 990     cl->do_generation(_old_gen);
 991     cl->do_generation(_young_gen);
 992   } else {
 993     cl->do_generation(_young_gen);
 994     cl->do_generation(_old_gen);
 995   }
 996 }
 997 
 998 bool GenCollectedHeap::is_maximal_no_gc() const {
 999   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1000 }
1001 
1002 void GenCollectedHeap::save_marks() {
1003   _young_gen->save_marks();
1004   _old_gen->save_marks();
1005 }
1006 
1007 GenCollectedHeap* GenCollectedHeap::heap() {
1008   // SerialHeap is the only subtype of GenCollectedHeap.
1009   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
1010 }
1011 
1012 #if INCLUDE_SERIALGC
1013 void GenCollectedHeap::prepare_for_compaction() {
1014   // Start by compacting into same gen.
1015   CompactPoint cp(_old_gen);
1016   _old_gen->prepare_for_compaction(&cp);
1017   _young_gen->prepare_for_compaction(&cp);
1018 }
1019 #endif // INCLUDE_SERIALGC
1020 
1021 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1022   log_debug(gc, verify)("%s", _old_gen->name());
1023   _old_gen->verify();
1024 
1025   log_debug(gc, verify)("%s", _young_gen->name());
1026   _young_gen->verify();
1027 
1028   log_debug(gc, verify)("RemSet");
1029   rem_set()->verify();
1030 }
1031 
1032 void GenCollectedHeap::print_on(outputStream* st) const {
1033   if (_young_gen != nullptr) {
1034     _young_gen->print_on(st);
1035   }
1036   if (_old_gen != nullptr) {
1037     _old_gen->print_on(st);
1038   }
1039   MetaspaceUtils::print_on(st);
1040 }
1041 
1042 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1043 }
1044 
1045 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
1046   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
1047 }
1048 
1049 void GenCollectedHeap::print_tracing_info() const {
1050   if (log_is_enabled(Debug, gc, heap, exit)) {
1051     LogStreamHandle(Debug, gc, heap, exit) lsh;
1052     _young_gen->print_summary_info_on(&lsh);
1053     _old_gen->print_summary_info_on(&lsh);
1054   }
1055 }
1056 
1057 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
1058   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
1059 
1060   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
1061                      HEAP_CHANGE_FORMAT" "
1062                      HEAP_CHANGE_FORMAT,
1063                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1064                                              pre_gc_values.young_gen_used(),
1065                                              pre_gc_values.young_gen_capacity(),
1066                                              def_new_gen->used(),
1067                                              def_new_gen->capacity()),
1068                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1069                                              pre_gc_values.eden_used(),
1070                                              pre_gc_values.eden_capacity(),
1071                                              def_new_gen->eden()->used(),
1072                                              def_new_gen->eden()->capacity()),
1073                      HEAP_CHANGE_FORMAT_ARGS("From",
1074                                              pre_gc_values.from_used(),
1075                                              pre_gc_values.from_capacity(),
1076                                              def_new_gen->from()->used(),
1077                                              def_new_gen->from()->capacity()));
1078   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1079                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1080                                              pre_gc_values.old_gen_used(),
1081                                              pre_gc_values.old_gen_capacity(),
1082                                              old_gen()->used(),
1083                                              old_gen()->capacity()));
1084   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1085 }
1086 
1087 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1088  private:
1089   bool _full;
1090  public:
1091   void do_generation(Generation* gen) {
1092     gen->gc_prologue(_full);
1093   }
1094   GenGCPrologueClosure(bool full) : _full(full) {};
1095 };
1096 
1097 void GenCollectedHeap::gc_prologue(bool full) {
1098   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1099 
1100   // Fill TLAB's and such
1101   ensure_parsability(true);   // retire TLABs
1102 
1103   // Walk generations
1104   GenGCPrologueClosure blk(full);
1105   generation_iterate(&blk, false);  // not old-to-young.
1106 };
1107 
1108 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1109  private:
1110   bool _full;
1111  public:
1112   void do_generation(Generation* gen) {
1113     gen->gc_epilogue(_full);
1114   }
1115   GenGCEpilogueClosure(bool full) : _full(full) {};
1116 };
1117 
1118 void GenCollectedHeap::gc_epilogue(bool full) {
1119 #if COMPILER2_OR_JVMCI
1120   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1121 #endif // COMPILER2_OR_JVMCI
1122 
1123   resize_all_tlabs();
1124 
1125   GenGCEpilogueClosure blk(full);
1126   generation_iterate(&blk, false);  // not old-to-young.
1127 
1128   MetaspaceCounters::update_performance_counters();
1129 };
1130 
1131 #ifndef PRODUCT
1132 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1133  private:
1134  public:
1135   void do_generation(Generation* gen) {
1136     gen->record_spaces_top();
1137   }
1138 };
1139 
1140 void GenCollectedHeap::record_gen_tops_before_GC() {
1141   if (ZapUnusedHeapArea) {
1142     GenGCSaveTopsBeforeGCClosure blk;
1143     generation_iterate(&blk, false);  // not old-to-young.
1144   }
1145 }
1146 #endif  // not PRODUCT
1147 
1148 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1149  public:
1150   void do_generation(Generation* gen) {
1151     gen->ensure_parsability();
1152   }
1153 };
1154 
1155 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1156   CollectedHeap::ensure_parsability(retire_tlabs);
1157   GenEnsureParsabilityClosure ep_cl;
1158   generation_iterate(&ep_cl, false);
1159 }