1 /*
   2  * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "gc/serial/cardTableRS.hpp"
  33 #include "gc/serial/defNewGeneration.inline.hpp"
  34 #include "gc/serial/genMarkSweep.hpp"
  35 #include "gc/serial/markSweep.hpp"
  36 #include "gc/serial/serialHeap.hpp"
  37 #include "gc/serial/serialMemoryPools.hpp"
  38 #include "gc/serial/serialVMOperations.hpp"
  39 #include "gc/serial/tenuredGeneration.inline.hpp"
  40 #include "gc/shared/cardTableBarrierSet.hpp"
  41 #include "gc/shared/classUnloadingContext.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/continuationGCSupport.inline.hpp"
  45 #include "gc/shared/gcId.hpp"
  46 #include "gc/shared/gcInitLogger.hpp"
  47 #include "gc/shared/gcLocker.inline.hpp"
  48 #include "gc/shared/gcPolicyCounters.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/gcVMOperations.hpp"
  52 #include "gc/shared/genArguments.hpp"
  53 #include "gc/shared/locationPrinter.inline.hpp"
  54 #include "gc/shared/oopStorage.inline.hpp"
  55 #include "gc/shared/oopStorageParState.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/scavengableNMethods.hpp"
  58 #include "gc/shared/slidingForwarding.hpp"
  59 #include "gc/shared/space.hpp"
  60 #include "gc/shared/strongRootsScope.hpp"
  61 #include "gc/shared/suspendibleThreadSet.hpp"
  62 #include "gc/shared/weakProcessor.hpp"
  63 #include "gc/shared/workerThread.hpp"
  64 #include "memory/iterator.hpp"
  65 #include "memory/metaspaceCounters.hpp"
  66 #include "memory/metaspaceUtils.hpp"
  67 #include "memory/resourceArea.hpp"
  68 #include "memory/universe.hpp"
  69 #include "oops/oop.inline.hpp"
  70 #include "runtime/handles.hpp"
  71 #include "runtime/handles.inline.hpp"
  72 #include "runtime/java.hpp"
  73 #include "runtime/mutexLocker.hpp"
  74 #include "runtime/threads.hpp"
  75 #include "runtime/vmThread.hpp"
  76 #include "services/memoryManager.hpp"
  77 #include "services/memoryService.hpp"
  78 #include "utilities/autoRestore.hpp"
  79 #include "utilities/debug.hpp"
  80 #include "utilities/formatBuffer.hpp"
  81 #include "utilities/macros.hpp"
  82 #include "utilities/stack.inline.hpp"
  83 #include "utilities/vmError.hpp"
  84 #if INCLUDE_JVMCI
  85 #include "jvmci/jvmci.hpp"
  86 #endif
  87 
  88 SerialHeap* SerialHeap::heap() {
  89   return named_heap<SerialHeap>(CollectedHeap::Serial);
  90 }
  91 
  92 SerialHeap::SerialHeap() :
  93     CollectedHeap(),
  94     _young_gen(nullptr),
  95     _old_gen(nullptr),
  96     _rem_set(nullptr),
  97     _gc_policy_counters(new GCPolicyCounters("Copy:MSC", 2, 2)),
  98     _incremental_collection_failed(false),
  99     _young_manager(nullptr),
 100     _old_manager(nullptr),
 101     _eden_pool(nullptr),
 102     _survivor_pool(nullptr),
 103     _old_pool(nullptr) {
 104   _young_manager = new GCMemoryManager("Copy");
 105   _old_manager = new GCMemoryManager("MarkSweepCompact");
 106 }
 107 
 108 void SerialHeap::initialize_serviceability() {
 109   DefNewGeneration* young = young_gen();
 110 
 111   // Add a memory pool for each space and young gen doesn't
 112   // support low memory detection as it is expected to get filled up.
 113   _eden_pool = new ContiguousSpacePool(young->eden(),
 114                                        "Eden Space",
 115                                        young->max_eden_size(),
 116                                        false /* support_usage_threshold */);
 117   _survivor_pool = new SurvivorContiguousSpacePool(young,
 118                                                    "Survivor Space",
 119                                                    young->max_survivor_size(),
 120                                                    false /* support_usage_threshold */);
 121   TenuredGeneration* old = old_gen();
 122   _old_pool = new TenuredGenerationPool(old, "Tenured Gen", true);
 123 
 124   _young_manager->add_pool(_eden_pool);
 125   _young_manager->add_pool(_survivor_pool);
 126   young->set_gc_manager(_young_manager);
 127 
 128   _old_manager->add_pool(_eden_pool);
 129   _old_manager->add_pool(_survivor_pool);
 130   _old_manager->add_pool(_old_pool);
 131   old->set_gc_manager(_old_manager);
 132 }
 133 
 134 GrowableArray<GCMemoryManager*> SerialHeap::memory_managers() {
 135   GrowableArray<GCMemoryManager*> memory_managers(2);
 136   memory_managers.append(_young_manager);
 137   memory_managers.append(_old_manager);
 138   return memory_managers;
 139 }
 140 
 141 GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
 142   GrowableArray<MemoryPool*> memory_pools(3);
 143   memory_pools.append(_eden_pool);
 144   memory_pools.append(_survivor_pool);
 145   memory_pools.append(_old_pool);
 146   return memory_pools;
 147 }
 148 
 149 void SerialHeap::safepoint_synchronize_begin() {
 150   if (UseStringDeduplication) {
 151     SuspendibleThreadSet::synchronize();
 152   }
 153 }
 154 
 155 void SerialHeap::safepoint_synchronize_end() {
 156   if (UseStringDeduplication) {
 157     SuspendibleThreadSet::desynchronize();
 158   }
 159 }
 160 
 161 HeapWord* SerialHeap::allocate_loaded_archive_space(size_t word_size) {
 162   MutexLocker ml(Heap_lock);
 163   return old_gen()->allocate(word_size, false /* is_tlab */);
 164 }
 165 
 166 void SerialHeap::complete_loaded_archive_space(MemRegion archive_space) {
 167   assert(old_gen()->used_region().contains(archive_space), "Archive space not contained in old gen");
 168   old_gen()->complete_loaded_archive_space(archive_space);
 169 }
 170 
 171 void SerialHeap::pin_object(JavaThread* thread, oop obj) {
 172   GCLocker::lock_critical(thread);
 173 }
 174 
 175 void SerialHeap::unpin_object(JavaThread* thread, oop obj) {
 176   GCLocker::unlock_critical(thread);
 177 }
 178 
 179 jint SerialHeap::initialize() {
 180   // Allocate space for the heap.
 181 
 182   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 183 
 184   if (!heap_rs.is_reserved()) {
 185     vm_shutdown_during_initialization(
 186       "Could not reserve enough space for object heap");
 187     return JNI_ENOMEM;
 188   }
 189 
 190   initialize_reserved_region(heap_rs);
 191 
 192   ReservedSpace young_rs = heap_rs.first_part(MaxNewSize);
 193   ReservedSpace old_rs = heap_rs.last_part(MaxNewSize);
 194 
 195   _rem_set = new CardTableRS(heap_rs.region());
 196   _rem_set->initialize(young_rs.base(), old_rs.base());
 197 
 198   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 199   bs->initialize();
 200   BarrierSet::set_barrier_set(bs);
 201 
 202   _young_gen = new DefNewGeneration(young_rs, NewSize, MinNewSize, MaxNewSize);
 203   _old_gen = new TenuredGeneration(old_rs, OldSize, MinOldSize, MaxOldSize, rem_set());
 204 
 205   GCInitLogger::print();
 206 
 207   SlidingForwarding::initialize(_reserved, SpaceAlignment / HeapWordSize);
 208 
 209   return JNI_OK;
 210 }
 211 
 212 ReservedHeapSpace SerialHeap::allocate(size_t alignment) {
 213   // Now figure out the total size.
 214   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 215   assert(alignment % pageSize == 0, "Must be");
 216 
 217   // Check for overflow.
 218   size_t total_reserved = MaxNewSize + MaxOldSize;
 219   if (total_reserved < MaxNewSize) {
 220     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 221                                   "the maximum representable size");
 222   }
 223   assert(total_reserved % alignment == 0,
 224          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 225          SIZE_FORMAT, total_reserved, alignment);
 226 
 227   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 228   size_t used_page_size = heap_rs.page_size();
 229 
 230   os::trace_page_sizes("Heap",
 231                        MinHeapSize,
 232                        total_reserved,
 233                        heap_rs.base(),
 234                        heap_rs.size(),
 235                        used_page_size);
 236 
 237   return heap_rs;
 238 }
 239 
 240 class GenIsScavengable : public BoolObjectClosure {
 241 public:
 242   bool do_object_b(oop obj) {
 243     return SerialHeap::heap()->is_in_young(obj);
 244   }
 245 };
 246 
 247 static GenIsScavengable _is_scavengable;
 248 
 249 void SerialHeap::post_initialize() {
 250   CollectedHeap::post_initialize();
 251 
 252   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 253 
 254   def_new_gen->ref_processor_init();
 255 
 256   MarkSweep::initialize();
 257 
 258   ScavengableNMethods::initialize(&_is_scavengable);
 259 }
 260 
 261 PreGenGCValues SerialHeap::get_pre_gc_values() const {
 262   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 263 
 264   return PreGenGCValues(def_new_gen->used(),
 265                         def_new_gen->capacity(),
 266                         def_new_gen->eden()->used(),
 267                         def_new_gen->eden()->capacity(),
 268                         def_new_gen->from()->used(),
 269                         def_new_gen->from()->capacity(),
 270                         old_gen()->used(),
 271                         old_gen()->capacity());
 272 }
 273 
 274 size_t SerialHeap::capacity() const {
 275   return _young_gen->capacity() + _old_gen->capacity();
 276 }
 277 
 278 size_t SerialHeap::used() const {
 279   return _young_gen->used() + _old_gen->used();
 280 }
 281 
 282 size_t SerialHeap::max_capacity() const {
 283   return _young_gen->max_capacity() + _old_gen->max_capacity();
 284 }
 285 
 286 // Return true if any of the following is true:
 287 // . the allocation won't fit into the current young gen heap
 288 // . gc locker is occupied (jni critical section)
 289 // . heap memory is tight -- the most recent previous collection
 290 //   was a full collection because a partial collection (would
 291 //   have) failed and is likely to fail again
 292 bool SerialHeap::should_try_older_generation_allocation(size_t word_size) const {
 293   size_t young_capacity = _young_gen->capacity_before_gc();
 294   return    (word_size > heap_word_size(young_capacity))
 295          || GCLocker::is_active_and_needs_gc()
 296          || incremental_collection_failed();
 297 }
 298 
 299 HeapWord* SerialHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
 300   HeapWord* result = nullptr;
 301   if (_old_gen->should_allocate(size, is_tlab)) {
 302     result = _old_gen->expand_and_allocate(size, is_tlab);
 303   }
 304   if (result == nullptr) {
 305     if (_young_gen->should_allocate(size, is_tlab)) {
 306       result = _young_gen->expand_and_allocate(size, is_tlab);
 307     }
 308   }
 309   assert(result == nullptr || is_in_reserved(result), "result not in heap");
 310   return result;
 311 }
 312 
 313 HeapWord* SerialHeap::mem_allocate_work(size_t size,
 314                                         bool is_tlab) {
 315 
 316   HeapWord* result = nullptr;
 317 
 318   // Loop until the allocation is satisfied, or unsatisfied after GC.
 319   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 320 
 321     // First allocation attempt is lock-free.
 322     Generation *young = _young_gen;
 323     if (young->should_allocate(size, is_tlab)) {
 324       result = young->par_allocate(size, is_tlab);
 325       if (result != nullptr) {
 326         assert(is_in_reserved(result), "result not in heap");
 327         return result;
 328       }
 329     }
 330     uint gc_count_before;  // Read inside the Heap_lock locked region.
 331     {
 332       MutexLocker ml(Heap_lock);
 333       log_trace(gc, alloc)("SerialHeap::mem_allocate_work: attempting locked slow path allocation");
 334       // Note that only large objects get a shot at being
 335       // allocated in later generations.
 336       bool first_only = !should_try_older_generation_allocation(size);
 337 
 338       result = attempt_allocation(size, is_tlab, first_only);
 339       if (result != nullptr) {
 340         assert(is_in_reserved(result), "result not in heap");
 341         return result;
 342       }
 343 
 344       if (GCLocker::is_active_and_needs_gc()) {
 345         if (is_tlab) {
 346           return nullptr;  // Caller will retry allocating individual object.
 347         }
 348         if (!is_maximal_no_gc()) {
 349           // Try and expand heap to satisfy request.
 350           result = expand_heap_and_allocate(size, is_tlab);
 351           // Result could be null if we are out of space.
 352           if (result != nullptr) {
 353             return result;
 354           }
 355         }
 356 
 357         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 358           return nullptr; // We didn't get to do a GC and we didn't get any memory.
 359         }
 360 
 361         // If this thread is not in a jni critical section, we stall
 362         // the requestor until the critical section has cleared and
 363         // GC allowed. When the critical section clears, a GC is
 364         // initiated by the last thread exiting the critical section; so
 365         // we retry the allocation sequence from the beginning of the loop,
 366         // rather than causing more, now probably unnecessary, GC attempts.
 367         JavaThread* jthr = JavaThread::current();
 368         if (!jthr->in_critical()) {
 369           MutexUnlocker mul(Heap_lock);
 370           // Wait for JNI critical section to be exited
 371           GCLocker::stall_until_clear();
 372           gclocker_stalled_count += 1;
 373           continue;
 374         } else {
 375           if (CheckJNICalls) {
 376             fatal("Possible deadlock due to allocating while"
 377                   " in jni critical section");
 378           }
 379           return nullptr;
 380         }
 381       }
 382 
 383       // Read the gc count while the heap lock is held.
 384       gc_count_before = total_collections();
 385     }
 386 
 387     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 388     VMThread::execute(&op);
 389     if (op.prologue_succeeded()) {
 390       result = op.result();
 391       if (op.gc_locked()) {
 392          assert(result == nullptr, "must be null if gc_locked() is true");
 393          continue;  // Retry and/or stall as necessary.
 394       }
 395 
 396       assert(result == nullptr || is_in_reserved(result),
 397              "result not in heap");
 398       return result;
 399     }
 400 
 401     // Give a warning if we seem to be looping forever.
 402     if ((QueuedAllocationWarningCount > 0) &&
 403         (try_count % QueuedAllocationWarningCount == 0)) {
 404           log_warning(gc, ergo)("SerialHeap::mem_allocate_work retries %d times,"
 405                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 406     }
 407   }
 408 }
 409 
 410 HeapWord* SerialHeap::attempt_allocation(size_t size,
 411                                          bool is_tlab,
 412                                          bool first_only) {
 413   HeapWord* res = nullptr;
 414 
 415   if (_young_gen->should_allocate(size, is_tlab)) {
 416     res = _young_gen->allocate(size, is_tlab);
 417     if (res != nullptr || first_only) {
 418       return res;
 419     }
 420   }
 421 
 422   if (_old_gen->should_allocate(size, is_tlab)) {
 423     res = _old_gen->allocate(size, is_tlab);
 424   }
 425 
 426   return res;
 427 }
 428 
 429 HeapWord* SerialHeap::mem_allocate(size_t size,
 430                                    bool* gc_overhead_limit_was_exceeded) {
 431   return mem_allocate_work(size,
 432                            false /* is_tlab */);
 433 }
 434 
 435 bool SerialHeap::must_clear_all_soft_refs() {
 436   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 437          _gc_cause == GCCause::_wb_full_gc;
 438 }
 439 
 440 void SerialHeap::collect_generation(Generation* gen, bool full, size_t size,
 441                                     bool is_tlab, bool run_verification, bool clear_soft_refs) {
 442   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 443   GCTraceTime(Trace, gc, phases) t1(title);
 444   TraceCollectorStats tcs(gen->counters());
 445   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC");
 446 
 447   gen->stat_record()->invocations++;
 448   gen->stat_record()->accumulated_time.start();
 449 
 450   // Must be done anew before each collection because
 451   // a previous collection will do mangling and will
 452   // change top of some spaces.
 453   record_gen_tops_before_GC();
 454 
 455   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 456 
 457   if (run_verification && VerifyBeforeGC) {
 458     Universe::verify("Before GC");
 459   }
 460   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 461 
 462   // Do collection work
 463   {
 464     save_marks();   // save marks for all gens
 465 
 466     gen->collect(full, clear_soft_refs, size, is_tlab);
 467   }
 468 
 469   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 470 
 471   gen->stat_record()->accumulated_time.stop();
 472 
 473   update_gc_stats(gen, full);
 474 
 475   if (run_verification && VerifyAfterGC) {
 476     Universe::verify("After GC");
 477   }
 478 }
 479 
 480 void SerialHeap::do_collection(bool full,
 481                                bool clear_all_soft_refs,
 482                                size_t size,
 483                                bool is_tlab,
 484                                GenerationType max_generation) {
 485   ResourceMark rm;
 486   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 487 
 488   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 489   assert(my_thread->is_VM_thread(), "only VM thread");
 490   assert(Heap_lock->is_locked(),
 491          "the requesting thread should have the Heap_lock");
 492   guarantee(!is_gc_active(), "collection is not reentrant");
 493 
 494   if (GCLocker::check_active_before_gc()) {
 495     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 496   }
 497 
 498   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 499                           soft_ref_policy()->should_clear_all_soft_refs();
 500 
 501   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 502 
 503   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
 504 
 505   bool complete = full && (max_generation == OldGen);
 506   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 507   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 508 
 509   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 510 
 511   bool run_verification = total_collections() >= VerifyGCStartAt;
 512   bool prepared_for_verification = false;
 513   bool do_full_collection = false;
 514 
 515   if (do_young_collection) {
 516     GCIdMark gc_id_mark;
 517     GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
 518     GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
 519 
 520     print_heap_before_gc();
 521 
 522     if (run_verification && VerifyBeforeGC) {
 523       prepare_for_verify();
 524       prepared_for_verification = true;
 525     }
 526 
 527     gc_prologue(complete);
 528     increment_total_collections(complete);
 529 
 530     collect_generation(_young_gen,
 531                        full,
 532                        size,
 533                        is_tlab,
 534                        run_verification,
 535                        do_clear_all_soft_refs);
 536 
 537     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 538         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 539       // Allocation request was met by young GC.
 540       size = 0;
 541     }
 542 
 543     // Ask if young collection is enough. If so, do the final steps for young collection,
 544     // and fallthrough to the end.
 545     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 546     if (!do_full_collection) {
 547       // Adjust generation sizes.
 548       _young_gen->compute_new_size();
 549 
 550       print_heap_change(pre_gc_values);
 551 
 552       // Track memory usage and detect low memory after GC finishes
 553       MemoryService::track_memory_usage();
 554 
 555       gc_epilogue(complete);
 556     }
 557 
 558     print_heap_after_gc();
 559 
 560   } else {
 561     // No young collection, ask if we need to perform Full collection.
 562     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 563   }
 564 
 565   if (do_full_collection) {
 566     GCIdMark gc_id_mark;
 567     GCTraceCPUTime tcpu(GenMarkSweep::gc_tracer());
 568     GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
 569 
 570     print_heap_before_gc();
 571 
 572     if (!prepared_for_verification && run_verification && VerifyBeforeGC) {
 573       prepare_for_verify();
 574     }
 575 
 576     if (!do_young_collection) {
 577       gc_prologue(complete);
 578       increment_total_collections(complete);
 579     }
 580 
 581     // Accounting quirk: total full collections would be incremented when "complete"
 582     // is set, by calling increment_total_collections above. However, we also need to
 583     // account Full collections that had "complete" unset.
 584     if (!complete) {
 585       increment_total_full_collections();
 586     }
 587 
 588     CodeCache::on_gc_marking_cycle_start();
 589 
 590     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
 591                               false /* unregister_nmethods_during_purge */,
 592                               false /* lock_codeblob_free_separately */);
 593 
 594     collect_generation(_old_gen,
 595                        full,
 596                        size,
 597                        is_tlab,
 598                        run_verification,
 599                        do_clear_all_soft_refs);
 600 
 601     CodeCache::on_gc_marking_cycle_finish();
 602     CodeCache::arm_all_nmethods();
 603 
 604     // Adjust generation sizes.
 605     _old_gen->compute_new_size();
 606     _young_gen->compute_new_size();
 607 
 608     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 609     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 610     DEBUG_ONLY(MetaspaceUtils::verify();)
 611 
 612     // Need to clear claim bits for the next mark.
 613     ClassLoaderDataGraph::clear_claimed_marks();
 614 
 615     // Resize the metaspace capacity after full collections
 616     MetaspaceGC::compute_new_size();
 617 
 618     print_heap_change(pre_gc_values);
 619 
 620     // Track memory usage and detect low memory after GC finishes
 621     MemoryService::track_memory_usage();
 622 
 623     // Need to tell the epilogue code we are done with Full GC, regardless what was
 624     // the initial value for "complete" flag.
 625     gc_epilogue(true);
 626 
 627     print_heap_after_gc();
 628   }
 629 }
 630 
 631 bool SerialHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 632                                            SerialHeap::GenerationType max_gen) const {
 633   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 634 }
 635 
 636 void SerialHeap::register_nmethod(nmethod* nm) {
 637   ScavengableNMethods::register_nmethod(nm);
 638 }
 639 
 640 void SerialHeap::unregister_nmethod(nmethod* nm) {
 641   ScavengableNMethods::unregister_nmethod(nm);
 642 }
 643 
 644 void SerialHeap::verify_nmethod(nmethod* nm) {
 645   ScavengableNMethods::verify_nmethod(nm);
 646 }
 647 
 648 void SerialHeap::prune_scavengable_nmethods() {
 649   ScavengableNMethods::prune_nmethods_not_into_young();
 650 }
 651 
 652 void SerialHeap::prune_unlinked_nmethods() {
 653   ScavengableNMethods::prune_unlinked_nmethods();
 654 }
 655 
 656 HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 657   GCCauseSetter x(this, GCCause::_allocation_failure);
 658   HeapWord* result = nullptr;
 659 
 660   assert(size != 0, "Precondition violated");
 661   if (GCLocker::is_active_and_needs_gc()) {
 662     // GC locker is active; instead of a collection we will attempt
 663     // to expand the heap, if there's room for expansion.
 664     if (!is_maximal_no_gc()) {
 665       result = expand_heap_and_allocate(size, is_tlab);
 666     }
 667     return result;   // Could be null if we are out of space.
 668   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 669     // Do an incremental collection.
 670     do_collection(false,                     // full
 671                   false,                     // clear_all_soft_refs
 672                   size,                      // size
 673                   is_tlab,                   // is_tlab
 674                   SerialHeap::OldGen); // max_generation
 675   } else {
 676     log_trace(gc)(" :: Trying full because partial may fail :: ");
 677     // Try a full collection; see delta for bug id 6266275
 678     // for the original code and why this has been simplified
 679     // with from-space allocation criteria modified and
 680     // such allocation moved out of the safepoint path.
 681     do_collection(true,                      // full
 682                   false,                     // clear_all_soft_refs
 683                   size,                      // size
 684                   is_tlab,                   // is_tlab
 685                   SerialHeap::OldGen); // max_generation
 686   }
 687 
 688   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 689 
 690   if (result != nullptr) {
 691     assert(is_in_reserved(result), "result not in heap");
 692     return result;
 693   }
 694 
 695   // OK, collection failed, try expansion.
 696   result = expand_heap_and_allocate(size, is_tlab);
 697   if (result != nullptr) {
 698     return result;
 699   }
 700 
 701   // If we reach this point, we're really out of memory. Try every trick
 702   // we can to reclaim memory. Force collection of soft references. Force
 703   // a complete compaction of the heap. Any additional methods for finding
 704   // free memory should be here, especially if they are expensive. If this
 705   // attempt fails, an OOM exception will be thrown.
 706   {
 707     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 708 
 709     do_collection(true,                      // full
 710                   true,                      // clear_all_soft_refs
 711                   size,                      // size
 712                   is_tlab,                   // is_tlab
 713                   SerialHeap::OldGen); // max_generation
 714   }
 715 
 716   result = attempt_allocation(size, is_tlab, false /* first_only */);
 717   if (result != nullptr) {
 718     assert(is_in_reserved(result), "result not in heap");
 719     return result;
 720   }
 721 
 722   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 723     "Flag should have been handled and cleared prior to this point");
 724 
 725   // What else?  We might try synchronous finalization later.  If the total
 726   // space available is large enough for the allocation, then a more
 727   // complete compaction phase than we've tried so far might be
 728   // appropriate.
 729   return nullptr;
 730 }
 731 
 732 #ifdef ASSERT
 733 class AssertNonScavengableClosure: public OopClosure {
 734 public:
 735   virtual void do_oop(oop* p) {
 736     assert(!SerialHeap::heap()->is_in_partial_collection(*p),
 737       "Referent should not be scavengable.");  }
 738   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 739 };
 740 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 741 #endif
 742 
 743 void SerialHeap::process_roots(ScanningOption so,
 744                                OopClosure* strong_roots,
 745                                CLDClosure* strong_cld_closure,
 746                                CLDClosure* weak_cld_closure,
 747                                CodeBlobToOopClosure* code_roots) {
 748   // General roots.
 749   assert(code_roots != nullptr, "code root closure should always be set");
 750 
 751   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 752 
 753   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 754   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
 755 
 756   Threads::oops_do(strong_roots, roots_from_code_p);
 757 
 758   OopStorageSet::strong_oops_do(strong_roots);
 759 
 760   if (so & SO_ScavengeCodeCache) {
 761     assert(code_roots != nullptr, "must supply closure for code cache");
 762 
 763     // We only visit parts of the CodeCache when scavenging.
 764     ScavengableNMethods::nmethods_do(code_roots);
 765   }
 766   if (so & SO_AllCodeCache) {
 767     assert(code_roots != nullptr, "must supply closure for code cache");
 768 
 769     // CMSCollector uses this to do intermediate-strength collections.
 770     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 771     CodeCache::blobs_do(code_roots);
 772   }
 773   // Verify that the code cache contents are not subject to
 774   // movement by a scavenging collection.
 775   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 776   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 777 }
 778 
 779 bool SerialHeap::no_allocs_since_save_marks() {
 780   return _young_gen->no_allocs_since_save_marks() &&
 781          _old_gen->no_allocs_since_save_marks();
 782 }
 783 
 784 // public collection interfaces
 785 void SerialHeap::collect(GCCause::Cause cause) {
 786   // The caller doesn't have the Heap_lock
 787   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 788 
 789   unsigned int gc_count_before;
 790   unsigned int full_gc_count_before;
 791 
 792   {
 793     MutexLocker ml(Heap_lock);
 794     // Read the GC count while holding the Heap_lock
 795     gc_count_before      = total_collections();
 796     full_gc_count_before = total_full_collections();
 797   }
 798 
 799   if (GCLocker::should_discard(cause, gc_count_before)) {
 800     return;
 801   }
 802 
 803   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
 804                            || (cause == GCCause::_gc_locker)
 805                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
 806 
 807   const GenerationType max_generation = should_run_young_gc
 808                                       ? YoungGen
 809                                       : OldGen;
 810 
 811   while (true) {
 812     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 813                          cause, max_generation);
 814     VMThread::execute(&op);
 815 
 816     if (!GCCause::is_explicit_full_gc(cause)) {
 817       return;
 818     }
 819 
 820     {
 821       MutexLocker ml(Heap_lock);
 822       // Read the GC count while holding the Heap_lock
 823       if (full_gc_count_before != total_full_collections()) {
 824         return;
 825       }
 826     }
 827 
 828     if (GCLocker::is_active_and_needs_gc()) {
 829       // If GCLocker is active, wait until clear before retrying.
 830       GCLocker::stall_until_clear();
 831     }
 832   }
 833 }
 834 
 835 void SerialHeap::do_full_collection(bool clear_all_soft_refs) {
 836    do_full_collection(clear_all_soft_refs, OldGen);
 837 }
 838 
 839 void SerialHeap::do_full_collection(bool clear_all_soft_refs,
 840                                     GenerationType last_generation) {
 841   do_collection(true,                   // full
 842                 clear_all_soft_refs,    // clear_all_soft_refs
 843                 0,                      // size
 844                 false,                  // is_tlab
 845                 last_generation);       // last_generation
 846   // Hack XXX FIX ME !!!
 847   // A scavenge may not have been attempted, or may have
 848   // been attempted and failed, because the old gen was too full
 849   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 850     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 851     // This time allow the old gen to be collected as well
 852     do_collection(true,                // full
 853                   clear_all_soft_refs, // clear_all_soft_refs
 854                   0,                   // size
 855                   false,               // is_tlab
 856                   OldGen);             // last_generation
 857   }
 858 }
 859 
 860 bool SerialHeap::is_in_young(const void* p) const {
 861   bool result = p < _old_gen->reserved().start();
 862   assert(result == _young_gen->is_in_reserved(p),
 863          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
 864   return result;
 865 }
 866 
 867 bool SerialHeap::requires_barriers(stackChunkOop obj) const {
 868   return !is_in_young(obj);
 869 }
 870 
 871 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 872 bool SerialHeap::is_in(const void* p) const {
 873   return _young_gen->is_in(p) || _old_gen->is_in(p);
 874 }
 875 
 876 #ifdef ASSERT
 877 // Don't implement this by using is_in_young().  This method is used
 878 // in some cases to check that is_in_young() is correct.
 879 bool SerialHeap::is_in_partial_collection(const void* p) {
 880   assert(is_in_reserved(p) || p == nullptr,
 881     "Does not work if address is non-null and outside of the heap");
 882   return p < _young_gen->reserved().end() && p != nullptr;
 883 }
 884 #endif
 885 
 886 void SerialHeap::object_iterate(ObjectClosure* cl) {
 887   _young_gen->object_iterate(cl);
 888   _old_gen->object_iterate(cl);
 889 }
 890 
 891 HeapWord* SerialHeap::block_start(const void* addr) const {
 892   assert(is_in_reserved(addr), "block_start of address outside of heap");
 893   if (_young_gen->is_in_reserved(addr)) {
 894     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 895     return _young_gen->block_start(addr);
 896   }
 897 
 898   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 899   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 900   return _old_gen->block_start(addr);
 901 }
 902 
 903 bool SerialHeap::block_is_obj(const HeapWord* addr) const {
 904   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 905   assert(block_start(addr) == addr, "addr must be a block start");
 906 
 907   if (_young_gen->is_in_reserved(addr)) {
 908     return _young_gen->eden()->is_in(addr)
 909         || _young_gen->from()->is_in(addr)
 910         || _young_gen->to()  ->is_in(addr);
 911   }
 912 
 913   assert(_old_gen->is_in_reserved(addr), "must be in old-gen");
 914   return addr < _old_gen->space()->top();
 915 }
 916 
 917 size_t SerialHeap::tlab_capacity(Thread* thr) const {
 918   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 919   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 920   return _young_gen->tlab_capacity();
 921 }
 922 
 923 size_t SerialHeap::tlab_used(Thread* thr) const {
 924   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 925   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 926   return _young_gen->tlab_used();
 927 }
 928 
 929 size_t SerialHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 930   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 931   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 932   return _young_gen->unsafe_max_tlab_alloc();
 933 }
 934 
 935 HeapWord* SerialHeap::allocate_new_tlab(size_t min_size,
 936                                         size_t requested_size,
 937                                         size_t* actual_size) {
 938   HeapWord* result = mem_allocate_work(requested_size /* size */,
 939                                        true /* is_tlab */);
 940   if (result != nullptr) {
 941     *actual_size = requested_size;
 942   }
 943 
 944   return result;
 945 }
 946 
 947 void SerialHeap::prepare_for_verify() {
 948   ensure_parsability(false);        // no need to retire TLABs
 949 }
 950 
 951 bool SerialHeap::is_maximal_no_gc() const {
 952   // We don't expand young-gen except at a GC.
 953   return _old_gen->is_maximal_no_gc();
 954 }
 955 
 956 void SerialHeap::save_marks() {
 957   _young_gen->save_marks();
 958   _old_gen->save_marks();
 959 }
 960 
 961 void SerialHeap::verify(VerifyOption option /* ignored */) {
 962   log_debug(gc, verify)("%s", _old_gen->name());
 963   _old_gen->verify();
 964 
 965   log_debug(gc, verify)("%s", _young_gen->name());
 966   _young_gen->verify();
 967 
 968   log_debug(gc, verify)("RemSet");
 969   rem_set()->verify();
 970 }
 971 
 972 void SerialHeap::print_on(outputStream* st) const {
 973   if (_young_gen != nullptr) {
 974     _young_gen->print_on(st);
 975   }
 976   if (_old_gen != nullptr) {
 977     _old_gen->print_on(st);
 978   }
 979   MetaspaceUtils::print_on(st);
 980 }
 981 
 982 void SerialHeap::gc_threads_do(ThreadClosure* tc) const {
 983 }
 984 
 985 bool SerialHeap::print_location(outputStream* st, void* addr) const {
 986   return BlockLocationPrinter<SerialHeap>::print_location(st, addr);
 987 }
 988 
 989 void SerialHeap::print_tracing_info() const {
 990   if (log_is_enabled(Debug, gc, heap, exit)) {
 991     LogStreamHandle(Debug, gc, heap, exit) lsh;
 992     _young_gen->print_summary_info_on(&lsh);
 993     _old_gen->print_summary_info_on(&lsh);
 994   }
 995 }
 996 
 997 void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 998   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 999 
1000   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
1001                      HEAP_CHANGE_FORMAT" "
1002                      HEAP_CHANGE_FORMAT,
1003                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1004                                              pre_gc_values.young_gen_used(),
1005                                              pre_gc_values.young_gen_capacity(),
1006                                              def_new_gen->used(),
1007                                              def_new_gen->capacity()),
1008                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1009                                              pre_gc_values.eden_used(),
1010                                              pre_gc_values.eden_capacity(),
1011                                              def_new_gen->eden()->used(),
1012                                              def_new_gen->eden()->capacity()),
1013                      HEAP_CHANGE_FORMAT_ARGS("From",
1014                                              pre_gc_values.from_used(),
1015                                              pre_gc_values.from_capacity(),
1016                                              def_new_gen->from()->used(),
1017                                              def_new_gen->from()->capacity()));
1018   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1019                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1020                                              pre_gc_values.old_gen_used(),
1021                                              pre_gc_values.old_gen_capacity(),
1022                                              old_gen()->used(),
1023                                              old_gen()->capacity()));
1024   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1025 }
1026 
1027 void SerialHeap::gc_prologue(bool full) {
1028   // Fill TLAB's and such
1029   ensure_parsability(true);   // retire TLABs
1030 
1031   _old_gen->gc_prologue();
1032 };
1033 
1034 void SerialHeap::gc_epilogue(bool full) {
1035 #if COMPILER2_OR_JVMCI
1036   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1037 #endif // COMPILER2_OR_JVMCI
1038 
1039   resize_all_tlabs();
1040 
1041   _young_gen->gc_epilogue(full);
1042   _old_gen->gc_epilogue();
1043 
1044   MetaspaceCounters::update_performance_counters();
1045 };
1046 
1047 #ifndef PRODUCT
1048 void SerialHeap::record_gen_tops_before_GC() {
1049   if (ZapUnusedHeapArea) {
1050     _young_gen->record_spaces_top();
1051     _old_gen->record_spaces_top();
1052   }
1053 }
1054 #endif  // not PRODUCT