1 /*
   2  * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "gc/serial/cardTableRS.hpp"
  33 #include "gc/serial/defNewGeneration.inline.hpp"
  34 #include "gc/serial/serialFullGC.hpp"
  35 #include "gc/serial/serialHeap.inline.hpp"
  36 #include "gc/serial/serialMemoryPools.hpp"
  37 #include "gc/serial/serialVMOperations.hpp"
  38 #include "gc/serial/tenuredGeneration.inline.hpp"
  39 #include "gc/shared/cardTableBarrierSet.hpp"
  40 #include "gc/shared/classUnloadingContext.hpp"
  41 #include "gc/shared/collectedHeap.inline.hpp"
  42 #include "gc/shared/collectorCounters.hpp"
  43 #include "gc/shared/continuationGCSupport.inline.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcInitLogger.hpp"
  46 #include "gc/shared/gcLocker.inline.hpp"
  47 #include "gc/shared/gcPolicyCounters.hpp"
  48 #include "gc/shared/gcTrace.hpp"
  49 #include "gc/shared/gcTraceTime.inline.hpp"
  50 #include "gc/shared/gcVMOperations.hpp"
  51 #include "gc/shared/genArguments.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/locationPrinter.inline.hpp"
  54 #include "gc/shared/oopStorage.inline.hpp"
  55 #include "gc/shared/oopStorageParState.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/scavengableNMethods.hpp"
  58 #include "gc/shared/slidingForwarding.hpp"
  59 #include "gc/shared/space.hpp"
  60 #include "gc/shared/strongRootsScope.hpp"
  61 #include "gc/shared/suspendibleThreadSet.hpp"
  62 #include "gc/shared/weakProcessor.hpp"
  63 #include "gc/shared/workerThread.hpp"
  64 #include "memory/iterator.hpp"
  65 #include "memory/metaspaceCounters.hpp"
  66 #include "memory/metaspaceUtils.hpp"
  67 #include "memory/resourceArea.hpp"
  68 #include "memory/universe.hpp"
  69 #include "oops/oop.inline.hpp"
  70 #include "runtime/handles.hpp"
  71 #include "runtime/handles.inline.hpp"
  72 #include "runtime/java.hpp"
  73 #include "runtime/mutexLocker.hpp"
  74 #include "runtime/threads.hpp"
  75 #include "runtime/vmThread.hpp"
  76 #include "services/memoryManager.hpp"
  77 #include "services/memoryService.hpp"
  78 #include "utilities/debug.hpp"
  79 #include "utilities/formatBuffer.hpp"
  80 #include "utilities/macros.hpp"
  81 #include "utilities/stack.inline.hpp"
  82 #include "utilities/vmError.hpp"
  83 #if INCLUDE_JVMCI
  84 #include "jvmci/jvmci.hpp"
  85 #endif
  86 
  87 SerialHeap* SerialHeap::heap() {
  88   return named_heap<SerialHeap>(CollectedHeap::Serial);
  89 }
  90 
  91 SerialHeap::SerialHeap() :
  92     CollectedHeap(),
  93     _young_gen(nullptr),
  94     _old_gen(nullptr),
  95     _rem_set(nullptr),
  96     _gc_policy_counters(new GCPolicyCounters("Copy:MSC", 2, 2)),
  97     _incremental_collection_failed(false),
  98     _young_manager(nullptr),
  99     _old_manager(nullptr),
 100     _eden_pool(nullptr),
 101     _survivor_pool(nullptr),
 102     _old_pool(nullptr) {
 103   _young_manager = new GCMemoryManager("Copy");
 104   _old_manager = new GCMemoryManager("MarkSweepCompact");
 105 }
 106 
 107 void SerialHeap::initialize_serviceability() {
 108   DefNewGeneration* young = young_gen();
 109 
 110   // Add a memory pool for each space and young gen doesn't
 111   // support low memory detection as it is expected to get filled up.
 112   _eden_pool = new ContiguousSpacePool(young->eden(),
 113                                        "Eden Space",
 114                                        young->max_eden_size(),
 115                                        false /* support_usage_threshold */);
 116   _survivor_pool = new SurvivorContiguousSpacePool(young,
 117                                                    "Survivor Space",
 118                                                    young->max_survivor_size(),
 119                                                    false /* support_usage_threshold */);
 120   TenuredGeneration* old = old_gen();
 121   _old_pool = new TenuredGenerationPool(old, "Tenured Gen", true);
 122 
 123   _young_manager->add_pool(_eden_pool);
 124   _young_manager->add_pool(_survivor_pool);
 125   young->set_gc_manager(_young_manager);
 126 
 127   _old_manager->add_pool(_eden_pool);
 128   _old_manager->add_pool(_survivor_pool);
 129   _old_manager->add_pool(_old_pool);
 130   old->set_gc_manager(_old_manager);
 131 }
 132 
 133 GrowableArray<GCMemoryManager*> SerialHeap::memory_managers() {
 134   GrowableArray<GCMemoryManager*> memory_managers(2);
 135   memory_managers.append(_young_manager);
 136   memory_managers.append(_old_manager);
 137   return memory_managers;
 138 }
 139 
 140 GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
 141   GrowableArray<MemoryPool*> memory_pools(3);
 142   memory_pools.append(_eden_pool);
 143   memory_pools.append(_survivor_pool);
 144   memory_pools.append(_old_pool);
 145   return memory_pools;
 146 }
 147 
 148 void SerialHeap::safepoint_synchronize_begin() {
 149   if (UseStringDeduplication) {
 150     SuspendibleThreadSet::synchronize();
 151   }
 152 }
 153 
 154 void SerialHeap::safepoint_synchronize_end() {
 155   if (UseStringDeduplication) {
 156     SuspendibleThreadSet::desynchronize();
 157   }
 158 }
 159 
 160 HeapWord* SerialHeap::allocate_loaded_archive_space(size_t word_size) {
 161   MutexLocker ml(Heap_lock);
 162   return old_gen()->allocate(word_size, false /* is_tlab */);
 163 }
 164 
 165 void SerialHeap::complete_loaded_archive_space(MemRegion archive_space) {
 166   assert(old_gen()->used_region().contains(archive_space), "Archive space not contained in old gen");
 167   old_gen()->complete_loaded_archive_space(archive_space);
 168 }
 169 
 170 void SerialHeap::pin_object(JavaThread* thread, oop obj) {
 171   GCLocker::lock_critical(thread);
 172 }
 173 
 174 void SerialHeap::unpin_object(JavaThread* thread, oop obj) {
 175   GCLocker::unlock_critical(thread);
 176 }
 177 
 178 jint SerialHeap::initialize() {
 179   // Allocate space for the heap.
 180 
 181   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 182 
 183   if (!heap_rs.is_reserved()) {
 184     vm_shutdown_during_initialization(
 185       "Could not reserve enough space for object heap");
 186     return JNI_ENOMEM;
 187   }
 188 
 189   initialize_reserved_region(heap_rs);
 190 
 191   ReservedSpace young_rs = heap_rs.first_part(MaxNewSize);
 192   ReservedSpace old_rs = heap_rs.last_part(MaxNewSize);
 193 
 194   _rem_set = new CardTableRS(heap_rs.region());
 195   _rem_set->initialize(young_rs.base(), old_rs.base());
 196 
 197   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 198   bs->initialize();
 199   BarrierSet::set_barrier_set(bs);
 200 
 201   _young_gen = new DefNewGeneration(young_rs, NewSize, MinNewSize, MaxNewSize);
 202   _old_gen = new TenuredGeneration(old_rs, OldSize, MinOldSize, MaxOldSize, rem_set());
 203 
 204   GCInitLogger::print();
 205 
 206   SlidingForwarding::initialize(_reserved, SpaceAlignment / HeapWordSize);
 207 
 208   return JNI_OK;
 209 }
 210 
 211 ReservedHeapSpace SerialHeap::allocate(size_t alignment) {
 212   // Now figure out the total size.
 213   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 214   assert(alignment % pageSize == 0, "Must be");
 215 
 216   // Check for overflow.
 217   size_t total_reserved = MaxNewSize + MaxOldSize;
 218   if (total_reserved < MaxNewSize) {
 219     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 220                                   "the maximum representable size");
 221   }
 222   assert(total_reserved % alignment == 0,
 223          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 224          SIZE_FORMAT, total_reserved, alignment);
 225 
 226   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 227   size_t used_page_size = heap_rs.page_size();
 228 
 229   os::trace_page_sizes("Heap",
 230                        MinHeapSize,
 231                        total_reserved,
 232                        heap_rs.base(),
 233                        heap_rs.size(),
 234                        used_page_size);
 235 
 236   return heap_rs;
 237 }
 238 
 239 class GenIsScavengable : public BoolObjectClosure {
 240 public:
 241   bool do_object_b(oop obj) {
 242     return SerialHeap::heap()->is_in_young(obj);
 243   }
 244 };
 245 
 246 static GenIsScavengable _is_scavengable;
 247 
 248 void SerialHeap::post_initialize() {
 249   CollectedHeap::post_initialize();
 250 
 251   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 252 
 253   def_new_gen->ref_processor_init();
 254 
 255   SerialFullGC::initialize();
 256 
 257   ScavengableNMethods::initialize(&_is_scavengable);
 258 }
 259 
 260 PreGenGCValues SerialHeap::get_pre_gc_values() const {
 261   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 262 
 263   return PreGenGCValues(def_new_gen->used(),
 264                         def_new_gen->capacity(),
 265                         def_new_gen->eden()->used(),
 266                         def_new_gen->eden()->capacity(),
 267                         def_new_gen->from()->used(),
 268                         def_new_gen->from()->capacity(),
 269                         old_gen()->used(),
 270                         old_gen()->capacity());
 271 }
 272 
 273 size_t SerialHeap::capacity() const {
 274   return _young_gen->capacity() + _old_gen->capacity();
 275 }
 276 
 277 size_t SerialHeap::used() const {
 278   return _young_gen->used() + _old_gen->used();
 279 }
 280 
 281 size_t SerialHeap::max_capacity() const {
 282   return _young_gen->max_capacity() + _old_gen->max_capacity();
 283 }
 284 
 285 // Return true if any of the following is true:
 286 // . the allocation won't fit into the current young gen heap
 287 // . gc locker is occupied (jni critical section)
 288 // . heap memory is tight -- the most recent previous collection
 289 //   was a full collection because a partial collection (would
 290 //   have) failed and is likely to fail again
 291 bool SerialHeap::should_try_older_generation_allocation(size_t word_size) const {
 292   size_t young_capacity = _young_gen->capacity_before_gc();
 293   return    (word_size > heap_word_size(young_capacity))
 294          || GCLocker::is_active_and_needs_gc()
 295          || incremental_collection_failed();
 296 }
 297 
 298 HeapWord* SerialHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
 299   HeapWord* result = nullptr;
 300   if (_old_gen->should_allocate(size, is_tlab)) {
 301     result = _old_gen->expand_and_allocate(size, is_tlab);
 302   }
 303   if (result == nullptr) {
 304     if (_young_gen->should_allocate(size, is_tlab)) {
 305       result = _young_gen->expand_and_allocate(size, is_tlab);
 306     }
 307   }
 308   assert(result == nullptr || is_in_reserved(result), "result not in heap");
 309   return result;
 310 }
 311 
 312 HeapWord* SerialHeap::mem_allocate_work(size_t size,
 313                                         bool is_tlab) {
 314 
 315   HeapWord* result = nullptr;
 316 
 317   // Loop until the allocation is satisfied, or unsatisfied after GC.
 318   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 319 
 320     // First allocation attempt is lock-free.
 321     Generation *young = _young_gen;
 322     if (young->should_allocate(size, is_tlab)) {
 323       result = young->par_allocate(size, is_tlab);
 324       if (result != nullptr) {
 325         assert(is_in_reserved(result), "result not in heap");
 326         return result;
 327       }
 328     }
 329     uint gc_count_before;  // Read inside the Heap_lock locked region.
 330     {
 331       MutexLocker ml(Heap_lock);
 332       log_trace(gc, alloc)("SerialHeap::mem_allocate_work: attempting locked slow path allocation");
 333       // Note that only large objects get a shot at being
 334       // allocated in later generations.
 335       bool first_only = !should_try_older_generation_allocation(size);
 336 
 337       result = attempt_allocation(size, is_tlab, first_only);
 338       if (result != nullptr) {
 339         assert(is_in_reserved(result), "result not in heap");
 340         return result;
 341       }
 342 
 343       if (GCLocker::is_active_and_needs_gc()) {
 344         if (is_tlab) {
 345           return nullptr;  // Caller will retry allocating individual object.
 346         }
 347         if (!is_maximal_no_gc()) {
 348           // Try and expand heap to satisfy request.
 349           result = expand_heap_and_allocate(size, is_tlab);
 350           // Result could be null if we are out of space.
 351           if (result != nullptr) {
 352             return result;
 353           }
 354         }
 355 
 356         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 357           return nullptr; // We didn't get to do a GC and we didn't get any memory.
 358         }
 359 
 360         // If this thread is not in a jni critical section, we stall
 361         // the requestor until the critical section has cleared and
 362         // GC allowed. When the critical section clears, a GC is
 363         // initiated by the last thread exiting the critical section; so
 364         // we retry the allocation sequence from the beginning of the loop,
 365         // rather than causing more, now probably unnecessary, GC attempts.
 366         JavaThread* jthr = JavaThread::current();
 367         if (!jthr->in_critical()) {
 368           MutexUnlocker mul(Heap_lock);
 369           // Wait for JNI critical section to be exited
 370           GCLocker::stall_until_clear();
 371           gclocker_stalled_count += 1;
 372           continue;
 373         } else {
 374           if (CheckJNICalls) {
 375             fatal("Possible deadlock due to allocating while"
 376                   " in jni critical section");
 377           }
 378           return nullptr;
 379         }
 380       }
 381 
 382       // Read the gc count while the heap lock is held.
 383       gc_count_before = total_collections();
 384     }
 385 
 386     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 387     VMThread::execute(&op);
 388     if (op.prologue_succeeded()) {
 389       result = op.result();
 390       if (op.gc_locked()) {
 391          assert(result == nullptr, "must be null if gc_locked() is true");
 392          continue;  // Retry and/or stall as necessary.
 393       }
 394 
 395       assert(result == nullptr || is_in_reserved(result),
 396              "result not in heap");
 397       return result;
 398     }
 399 
 400     // Give a warning if we seem to be looping forever.
 401     if ((QueuedAllocationWarningCount > 0) &&
 402         (try_count % QueuedAllocationWarningCount == 0)) {
 403           log_warning(gc, ergo)("SerialHeap::mem_allocate_work retries %d times,"
 404                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 405     }
 406   }
 407 }
 408 
 409 HeapWord* SerialHeap::attempt_allocation(size_t size,
 410                                          bool is_tlab,
 411                                          bool first_only) {
 412   HeapWord* res = nullptr;
 413 
 414   if (_young_gen->should_allocate(size, is_tlab)) {
 415     res = _young_gen->allocate(size, is_tlab);
 416     if (res != nullptr || first_only) {
 417       return res;
 418     }
 419   }
 420 
 421   if (_old_gen->should_allocate(size, is_tlab)) {
 422     res = _old_gen->allocate(size, is_tlab);
 423   }
 424 
 425   return res;
 426 }
 427 
 428 HeapWord* SerialHeap::mem_allocate(size_t size,
 429                                    bool* gc_overhead_limit_was_exceeded) {
 430   return mem_allocate_work(size,
 431                            false /* is_tlab */);
 432 }
 433 
 434 bool SerialHeap::must_clear_all_soft_refs() {
 435   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 436          _gc_cause == GCCause::_wb_full_gc;
 437 }
 438 
 439 void SerialHeap::collect_generation(Generation* gen, bool full, size_t size,
 440                                     bool is_tlab, bool run_verification, bool clear_soft_refs) {
 441   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 442   GCTraceTime(Trace, gc, phases) t1(title);
 443   TraceCollectorStats tcs(gen->counters());
 444   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC");
 445 
 446   gen->stat_record()->invocations++;
 447   gen->stat_record()->accumulated_time.start();
 448 
 449   // Must be done anew before each collection because
 450   // a previous collection will do mangling and will
 451   // change top of some spaces.
 452   record_gen_tops_before_GC();
 453 
 454   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 455 
 456   if (run_verification && VerifyBeforeGC) {
 457     Universe::verify("Before GC");
 458   }
 459   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 460 
 461   // Do collection work
 462   {
 463     save_marks();   // save marks for all gens
 464 
 465     gen->collect(full, clear_soft_refs, size, is_tlab);
 466   }
 467 
 468   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 469 
 470   gen->stat_record()->accumulated_time.stop();
 471 
 472   update_gc_stats(gen, full);
 473 
 474   if (run_verification && VerifyAfterGC) {
 475     Universe::verify("After GC");
 476   }
 477 }
 478 
 479 void SerialHeap::do_collection(bool full,
 480                                bool clear_all_soft_refs,
 481                                size_t size,
 482                                bool is_tlab,
 483                                GenerationType max_generation) {
 484   ResourceMark rm;
 485   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 486 
 487   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 488   assert(my_thread->is_VM_thread(), "only VM thread");
 489   assert(Heap_lock->is_locked(),
 490          "the requesting thread should have the Heap_lock");
 491   guarantee(!is_gc_active(), "collection is not reentrant");
 492 
 493   if (GCLocker::check_active_before_gc()) {
 494     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 495   }
 496 
 497   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 498                           soft_ref_policy()->should_clear_all_soft_refs();
 499 
 500   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 501 
 502   IsGCActiveMark active_gc_mark;
 503 
 504   bool complete = full && (max_generation == OldGen);
 505   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 506   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 507 
 508   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 509 
 510   bool run_verification = total_collections() >= VerifyGCStartAt;
 511   bool prepared_for_verification = false;
 512   bool do_full_collection = false;
 513 
 514   if (do_young_collection) {
 515     GCIdMark gc_id_mark;
 516     GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
 517     GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
 518 
 519     print_heap_before_gc();
 520 
 521     if (run_verification && VerifyBeforeGC) {
 522       prepare_for_verify();
 523       prepared_for_verification = true;
 524     }
 525 
 526     gc_prologue(complete);
 527     increment_total_collections(complete);
 528 
 529     collect_generation(_young_gen,
 530                        full,
 531                        size,
 532                        is_tlab,
 533                        run_verification,
 534                        do_clear_all_soft_refs);
 535 
 536     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 537         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 538       // Allocation request was met by young GC.
 539       size = 0;
 540     }
 541 
 542     // Ask if young collection is enough. If so, do the final steps for young collection,
 543     // and fallthrough to the end.
 544     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 545     if (!do_full_collection) {
 546       // Adjust generation sizes.
 547       _young_gen->compute_new_size();
 548 
 549       print_heap_change(pre_gc_values);
 550 
 551       // Track memory usage and detect low memory after GC finishes
 552       MemoryService::track_memory_usage();
 553 
 554       gc_epilogue(complete);
 555     }
 556 
 557     print_heap_after_gc();
 558 
 559   } else {
 560     // No young collection, ask if we need to perform Full collection.
 561     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 562   }
 563 
 564   if (do_full_collection) {
 565     GCIdMark gc_id_mark;
 566     GCTraceCPUTime tcpu(SerialFullGC::gc_tracer());
 567     GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
 568 
 569     print_heap_before_gc();
 570 
 571     if (!prepared_for_verification && run_verification && VerifyBeforeGC) {
 572       prepare_for_verify();
 573     }
 574 
 575     if (!do_young_collection) {
 576       gc_prologue(complete);
 577       increment_total_collections(complete);
 578     }
 579 
 580     // Accounting quirk: total full collections would be incremented when "complete"
 581     // is set, by calling increment_total_collections above. However, we also need to
 582     // account Full collections that had "complete" unset.
 583     if (!complete) {
 584       increment_total_full_collections();
 585     }
 586 
 587     CodeCache::on_gc_marking_cycle_start();
 588 
 589     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
 590                               false /* unregister_nmethods_during_purge */,
 591                               false /* lock_nmethod_free_separately */);
 592 
 593     collect_generation(_old_gen,
 594                        full,
 595                        size,
 596                        is_tlab,
 597                        run_verification,
 598                        do_clear_all_soft_refs);
 599 
 600     CodeCache::on_gc_marking_cycle_finish();
 601     CodeCache::arm_all_nmethods();
 602 
 603     // Adjust generation sizes.
 604     _old_gen->compute_new_size();
 605     _young_gen->compute_new_size();
 606 
 607     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 608     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 609     DEBUG_ONLY(MetaspaceUtils::verify();)
 610 
 611     // Need to clear claim bits for the next mark.
 612     ClassLoaderDataGraph::clear_claimed_marks();
 613 
 614     // Resize the metaspace capacity after full collections
 615     MetaspaceGC::compute_new_size();
 616 
 617     print_heap_change(pre_gc_values);
 618 
 619     // Track memory usage and detect low memory after GC finishes
 620     MemoryService::track_memory_usage();
 621 
 622     // Need to tell the epilogue code we are done with Full GC, regardless what was
 623     // the initial value for "complete" flag.
 624     gc_epilogue(true);
 625 
 626     print_heap_after_gc();
 627   }
 628 }
 629 
 630 bool SerialHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 631                                            SerialHeap::GenerationType max_gen) const {
 632   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 633 }
 634 
 635 void SerialHeap::register_nmethod(nmethod* nm) {
 636   ScavengableNMethods::register_nmethod(nm);
 637 }
 638 
 639 void SerialHeap::unregister_nmethod(nmethod* nm) {
 640   ScavengableNMethods::unregister_nmethod(nm);
 641 }
 642 
 643 void SerialHeap::verify_nmethod(nmethod* nm) {
 644   ScavengableNMethods::verify_nmethod(nm);
 645 }
 646 
 647 void SerialHeap::prune_scavengable_nmethods() {
 648   ScavengableNMethods::prune_nmethods_not_into_young();
 649 }
 650 
 651 void SerialHeap::prune_unlinked_nmethods() {
 652   ScavengableNMethods::prune_unlinked_nmethods();
 653 }
 654 
 655 HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 656   GCCauseSetter x(this, GCCause::_allocation_failure);
 657   HeapWord* result = nullptr;
 658 
 659   assert(size != 0, "Precondition violated");
 660   if (GCLocker::is_active_and_needs_gc()) {
 661     // GC locker is active; instead of a collection we will attempt
 662     // to expand the heap, if there's room for expansion.
 663     if (!is_maximal_no_gc()) {
 664       result = expand_heap_and_allocate(size, is_tlab);
 665     }
 666     return result;   // Could be null if we are out of space.
 667   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 668     // Do an incremental collection.
 669     do_collection(false,                     // full
 670                   false,                     // clear_all_soft_refs
 671                   size,                      // size
 672                   is_tlab,                   // is_tlab
 673                   SerialHeap::OldGen); // max_generation
 674   } else {
 675     log_trace(gc)(" :: Trying full because partial may fail :: ");
 676     // Try a full collection; see delta for bug id 6266275
 677     // for the original code and why this has been simplified
 678     // with from-space allocation criteria modified and
 679     // such allocation moved out of the safepoint path.
 680     do_collection(true,                      // full
 681                   false,                     // clear_all_soft_refs
 682                   size,                      // size
 683                   is_tlab,                   // is_tlab
 684                   SerialHeap::OldGen); // max_generation
 685   }
 686 
 687   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 688 
 689   if (result != nullptr) {
 690     assert(is_in_reserved(result), "result not in heap");
 691     return result;
 692   }
 693 
 694   // OK, collection failed, try expansion.
 695   result = expand_heap_and_allocate(size, is_tlab);
 696   if (result != nullptr) {
 697     return result;
 698   }
 699 
 700   // If we reach this point, we're really out of memory. Try every trick
 701   // we can to reclaim memory. Force collection of soft references. Force
 702   // a complete compaction of the heap. Any additional methods for finding
 703   // free memory should be here, especially if they are expensive. If this
 704   // attempt fails, an OOM exception will be thrown.
 705   {
 706     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 707 
 708     do_collection(true,                      // full
 709                   true,                      // clear_all_soft_refs
 710                   size,                      // size
 711                   is_tlab,                   // is_tlab
 712                   SerialHeap::OldGen); // max_generation
 713   }
 714 
 715   result = attempt_allocation(size, is_tlab, false /* first_only */);
 716   if (result != nullptr) {
 717     assert(is_in_reserved(result), "result not in heap");
 718     return result;
 719   }
 720 
 721   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 722     "Flag should have been handled and cleared prior to this point");
 723 
 724   // What else?  We might try synchronous finalization later.  If the total
 725   // space available is large enough for the allocation, then a more
 726   // complete compaction phase than we've tried so far might be
 727   // appropriate.
 728   return nullptr;
 729 }
 730 
 731 void SerialHeap::process_roots(ScanningOption so,
 732                                OopClosure* strong_roots,
 733                                CLDClosure* strong_cld_closure,
 734                                CLDClosure* weak_cld_closure,
 735                                NMethodToOopClosure* code_roots) {
 736   // General roots.
 737   assert(code_roots != nullptr, "code root closure should always be set");
 738 
 739   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 740 
 741   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 742   NMethodToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
 743 
 744   Threads::oops_do(strong_roots, roots_from_code_p);
 745 
 746   OopStorageSet::strong_oops_do(strong_roots);
 747 
 748   if (so & SO_ScavengeCodeCache) {
 749     assert(code_roots != nullptr, "must supply closure for code cache");
 750 
 751     // We only visit parts of the CodeCache when scavenging.
 752     ScavengableNMethods::nmethods_do(code_roots);
 753   }
 754   if (so & SO_AllCodeCache) {
 755     assert(code_roots != nullptr, "must supply closure for code cache");
 756 
 757     // CMSCollector uses this to do intermediate-strength collections.
 758     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 759     CodeCache::nmethods_do(code_roots);
 760   }
 761 }
 762 
 763 bool SerialHeap::no_allocs_since_save_marks() {
 764   return _young_gen->no_allocs_since_save_marks() &&
 765          _old_gen->no_allocs_since_save_marks();
 766 }
 767 
 768 void SerialHeap::scan_evacuated_objs(YoungGenScanClosure* young_cl,
 769                                      OldGenScanClosure* old_cl) {
 770   do {
 771     young_gen()->oop_since_save_marks_iterate(young_cl);
 772     old_gen()->oop_since_save_marks_iterate(old_cl);
 773   } while (!no_allocs_since_save_marks());
 774   guarantee(young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
 775 }
 776 
 777 // public collection interfaces
 778 void SerialHeap::collect(GCCause::Cause cause) {
 779   // The caller doesn't have the Heap_lock
 780   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 781 
 782   unsigned int gc_count_before;
 783   unsigned int full_gc_count_before;
 784 
 785   {
 786     MutexLocker ml(Heap_lock);
 787     // Read the GC count while holding the Heap_lock
 788     gc_count_before      = total_collections();
 789     full_gc_count_before = total_full_collections();
 790   }
 791 
 792   if (GCLocker::should_discard(cause, gc_count_before)) {
 793     return;
 794   }
 795 
 796   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
 797                            || (cause == GCCause::_gc_locker)
 798                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
 799 
 800   const GenerationType max_generation = should_run_young_gc
 801                                       ? YoungGen
 802                                       : OldGen;
 803 
 804   while (true) {
 805     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 806                          cause, max_generation);
 807     VMThread::execute(&op);
 808 
 809     if (!GCCause::is_explicit_full_gc(cause)) {
 810       return;
 811     }
 812 
 813     {
 814       MutexLocker ml(Heap_lock);
 815       // Read the GC count while holding the Heap_lock
 816       if (full_gc_count_before != total_full_collections()) {
 817         return;
 818       }
 819     }
 820 
 821     if (GCLocker::is_active_and_needs_gc()) {
 822       // If GCLocker is active, wait until clear before retrying.
 823       GCLocker::stall_until_clear();
 824     }
 825   }
 826 }
 827 
 828 void SerialHeap::do_full_collection(bool clear_all_soft_refs) {
 829    do_full_collection(clear_all_soft_refs, OldGen);
 830 }
 831 
 832 void SerialHeap::do_full_collection(bool clear_all_soft_refs,
 833                                     GenerationType last_generation) {
 834   do_collection(true,                   // full
 835                 clear_all_soft_refs,    // clear_all_soft_refs
 836                 0,                      // size
 837                 false,                  // is_tlab
 838                 last_generation);       // last_generation
 839   // Hack XXX FIX ME !!!
 840   // A scavenge may not have been attempted, or may have
 841   // been attempted and failed, because the old gen was too full
 842   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 843     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 844     // This time allow the old gen to be collected as well
 845     do_collection(true,                // full
 846                   clear_all_soft_refs, // clear_all_soft_refs
 847                   0,                   // size
 848                   false,               // is_tlab
 849                   OldGen);             // last_generation
 850   }
 851 }
 852 
 853 bool SerialHeap::is_in_young(const void* p) const {
 854   bool result = p < _old_gen->reserved().start();
 855   assert(result == _young_gen->is_in_reserved(p),
 856          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
 857   return result;
 858 }
 859 
 860 bool SerialHeap::requires_barriers(stackChunkOop obj) const {
 861   return !is_in_young(obj);
 862 }
 863 
 864 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 865 bool SerialHeap::is_in(const void* p) const {
 866   return _young_gen->is_in(p) || _old_gen->is_in(p);
 867 }
 868 
 869 void SerialHeap::object_iterate(ObjectClosure* cl) {
 870   _young_gen->object_iterate(cl);
 871   _old_gen->object_iterate(cl);
 872 }
 873 
 874 HeapWord* SerialHeap::block_start(const void* addr) const {
 875   assert(is_in_reserved(addr), "block_start of address outside of heap");
 876   if (_young_gen->is_in_reserved(addr)) {
 877     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 878     return _young_gen->block_start(addr);
 879   }
 880 
 881   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 882   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 883   return _old_gen->block_start(addr);
 884 }
 885 
 886 bool SerialHeap::block_is_obj(const HeapWord* addr) const {
 887   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 888   assert(block_start(addr) == addr, "addr must be a block start");
 889 
 890   if (_young_gen->is_in_reserved(addr)) {
 891     return _young_gen->eden()->is_in(addr)
 892         || _young_gen->from()->is_in(addr)
 893         || _young_gen->to()  ->is_in(addr);
 894   }
 895 
 896   assert(_old_gen->is_in_reserved(addr), "must be in old-gen");
 897   return addr < _old_gen->space()->top();
 898 }
 899 
 900 size_t SerialHeap::tlab_capacity(Thread* thr) const {
 901   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 902   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 903   return _young_gen->tlab_capacity();
 904 }
 905 
 906 size_t SerialHeap::tlab_used(Thread* thr) const {
 907   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 908   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 909   return _young_gen->tlab_used();
 910 }
 911 
 912 size_t SerialHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 913   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 914   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 915   return _young_gen->unsafe_max_tlab_alloc();
 916 }
 917 
 918 HeapWord* SerialHeap::allocate_new_tlab(size_t min_size,
 919                                         size_t requested_size,
 920                                         size_t* actual_size) {
 921   HeapWord* result = mem_allocate_work(requested_size /* size */,
 922                                        true /* is_tlab */);
 923   if (result != nullptr) {
 924     *actual_size = requested_size;
 925   }
 926 
 927   return result;
 928 }
 929 
 930 void SerialHeap::prepare_for_verify() {
 931   ensure_parsability(false);        // no need to retire TLABs
 932 }
 933 
 934 bool SerialHeap::is_maximal_no_gc() const {
 935   // We don't expand young-gen except at a GC.
 936   return _old_gen->is_maximal_no_gc();
 937 }
 938 
 939 void SerialHeap::save_marks() {
 940   _young_gen->save_marks();
 941   _old_gen->save_marks();
 942 }
 943 
 944 void SerialHeap::verify(VerifyOption option /* ignored */) {
 945   log_debug(gc, verify)("%s", _old_gen->name());
 946   _old_gen->verify();
 947 
 948   log_debug(gc, verify)("%s", _young_gen->name());
 949   _young_gen->verify();
 950 
 951   log_debug(gc, verify)("RemSet");
 952   rem_set()->verify();
 953 }
 954 
 955 void SerialHeap::print_on(outputStream* st) const {
 956   if (_young_gen != nullptr) {
 957     _young_gen->print_on(st);
 958   }
 959   if (_old_gen != nullptr) {
 960     _old_gen->print_on(st);
 961   }
 962   MetaspaceUtils::print_on(st);
 963 }
 964 
 965 void SerialHeap::gc_threads_do(ThreadClosure* tc) const {
 966 }
 967 
 968 bool SerialHeap::print_location(outputStream* st, void* addr) const {
 969   return BlockLocationPrinter<SerialHeap>::print_location(st, addr);
 970 }
 971 
 972 void SerialHeap::print_tracing_info() const {
 973   if (log_is_enabled(Debug, gc, heap, exit)) {
 974     LogStreamHandle(Debug, gc, heap, exit) lsh;
 975     _young_gen->print_summary_info_on(&lsh);
 976     _old_gen->print_summary_info_on(&lsh);
 977   }
 978 }
 979 
 980 void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 981   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 982 
 983   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
 984                      HEAP_CHANGE_FORMAT" "
 985                      HEAP_CHANGE_FORMAT,
 986                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
 987                                              pre_gc_values.young_gen_used(),
 988                                              pre_gc_values.young_gen_capacity(),
 989                                              def_new_gen->used(),
 990                                              def_new_gen->capacity()),
 991                      HEAP_CHANGE_FORMAT_ARGS("Eden",
 992                                              pre_gc_values.eden_used(),
 993                                              pre_gc_values.eden_capacity(),
 994                                              def_new_gen->eden()->used(),
 995                                              def_new_gen->eden()->capacity()),
 996                      HEAP_CHANGE_FORMAT_ARGS("From",
 997                                              pre_gc_values.from_used(),
 998                                              pre_gc_values.from_capacity(),
 999                                              def_new_gen->from()->used(),
1000                                              def_new_gen->from()->capacity()));
1001   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1002                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1003                                              pre_gc_values.old_gen_used(),
1004                                              pre_gc_values.old_gen_capacity(),
1005                                              old_gen()->used(),
1006                                              old_gen()->capacity()));
1007   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1008 }
1009 
1010 void SerialHeap::gc_prologue(bool full) {
1011   // Fill TLAB's and such
1012   ensure_parsability(true);   // retire TLABs
1013 
1014   _old_gen->gc_prologue();
1015 };
1016 
1017 void SerialHeap::gc_epilogue(bool full) {
1018 #if COMPILER2_OR_JVMCI
1019   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1020 #endif // COMPILER2_OR_JVMCI
1021 
1022   resize_all_tlabs();
1023 
1024   _young_gen->gc_epilogue(full);
1025   _old_gen->gc_epilogue();
1026 
1027   MetaspaceCounters::update_performance_counters();
1028 };
1029 
1030 #ifndef PRODUCT
1031 void SerialHeap::record_gen_tops_before_GC() {
1032   if (ZapUnusedHeapArea) {
1033     _young_gen->record_spaces_top();
1034     _old_gen->record_spaces_top();
1035   }
1036 }
1037 #endif  // not PRODUCT