1 /*
   2  * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "gc/serial/cardTableRS.hpp"
  33 #include "gc/serial/defNewGeneration.inline.hpp"
  34 #include "gc/serial/genMarkSweep.hpp"
  35 #include "gc/serial/markSweep.hpp"
  36 #include "gc/serial/serialHeap.hpp"
  37 #include "gc/serial/serialMemoryPools.hpp"
  38 #include "gc/serial/serialVMOperations.hpp"
  39 #include "gc/serial/tenuredGeneration.inline.hpp"
  40 #include "gc/shared/cardTableBarrierSet.hpp"
  41 #include "gc/shared/classUnloadingContext.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/continuationGCSupport.inline.hpp"
  45 #include "gc/shared/gcId.hpp"
  46 #include "gc/shared/gcInitLogger.hpp"
  47 #include "gc/shared/gcLocker.inline.hpp"
  48 #include "gc/shared/gcPolicyCounters.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/gcVMOperations.hpp"
  52 #include "gc/shared/genArguments.hpp"
  53 #include "gc/shared/locationPrinter.inline.hpp"
  54 #include "gc/shared/oopStorage.inline.hpp"
  55 #include "gc/shared/oopStorageParState.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/scavengableNMethods.hpp"

  58 #include "gc/shared/space.hpp"
  59 #include "gc/shared/strongRootsScope.hpp"
  60 #include "gc/shared/suspendibleThreadSet.hpp"
  61 #include "gc/shared/weakProcessor.hpp"
  62 #include "gc/shared/workerThread.hpp"
  63 #include "memory/iterator.hpp"
  64 #include "memory/metaspaceCounters.hpp"
  65 #include "memory/metaspaceUtils.hpp"
  66 #include "memory/resourceArea.hpp"
  67 #include "memory/universe.hpp"
  68 #include "oops/oop.inline.hpp"
  69 #include "runtime/handles.hpp"
  70 #include "runtime/handles.inline.hpp"
  71 #include "runtime/java.hpp"
  72 #include "runtime/mutexLocker.hpp"
  73 #include "runtime/threads.hpp"
  74 #include "runtime/vmThread.hpp"
  75 #include "services/memoryManager.hpp"
  76 #include "services/memoryService.hpp"
  77 #include "utilities/autoRestore.hpp"
  78 #include "utilities/debug.hpp"
  79 #include "utilities/formatBuffer.hpp"
  80 #include "utilities/macros.hpp"
  81 #include "utilities/stack.inline.hpp"
  82 #include "utilities/vmError.hpp"
  83 #if INCLUDE_JVMCI
  84 #include "jvmci/jvmci.hpp"
  85 #endif
  86 
  87 SerialHeap* SerialHeap::heap() {
  88   return named_heap<SerialHeap>(CollectedHeap::Serial);
  89 }
  90 
  91 SerialHeap::SerialHeap() :
  92     CollectedHeap(),
  93     _young_gen(nullptr),
  94     _old_gen(nullptr),
  95     _rem_set(nullptr),
  96     _gc_policy_counters(new GCPolicyCounters("Copy:MSC", 2, 2)),
  97     _incremental_collection_failed(false),
  98     _young_manager(nullptr),
  99     _old_manager(nullptr),
 100     _eden_pool(nullptr),
 101     _survivor_pool(nullptr),
 102     _old_pool(nullptr) {
 103   _young_manager = new GCMemoryManager("Copy");
 104   _old_manager = new GCMemoryManager("MarkSweepCompact");
 105 }
 106 
 107 void SerialHeap::initialize_serviceability() {
 108   DefNewGeneration* young = young_gen();
 109 
 110   // Add a memory pool for each space and young gen doesn't
 111   // support low memory detection as it is expected to get filled up.
 112   _eden_pool = new ContiguousSpacePool(young->eden(),
 113                                        "Eden Space",
 114                                        young->max_eden_size(),
 115                                        false /* support_usage_threshold */);
 116   _survivor_pool = new SurvivorContiguousSpacePool(young,
 117                                                    "Survivor Space",
 118                                                    young->max_survivor_size(),
 119                                                    false /* support_usage_threshold */);
 120   TenuredGeneration* old = old_gen();
 121   _old_pool = new TenuredGenerationPool(old, "Tenured Gen", true);
 122 
 123   _young_manager->add_pool(_eden_pool);
 124   _young_manager->add_pool(_survivor_pool);
 125   young->set_gc_manager(_young_manager);
 126 
 127   _old_manager->add_pool(_eden_pool);
 128   _old_manager->add_pool(_survivor_pool);
 129   _old_manager->add_pool(_old_pool);
 130   old->set_gc_manager(_old_manager);
 131 }
 132 
 133 GrowableArray<GCMemoryManager*> SerialHeap::memory_managers() {
 134   GrowableArray<GCMemoryManager*> memory_managers(2);
 135   memory_managers.append(_young_manager);
 136   memory_managers.append(_old_manager);
 137   return memory_managers;
 138 }
 139 
 140 GrowableArray<MemoryPool*> SerialHeap::memory_pools() {
 141   GrowableArray<MemoryPool*> memory_pools(3);
 142   memory_pools.append(_eden_pool);
 143   memory_pools.append(_survivor_pool);
 144   memory_pools.append(_old_pool);
 145   return memory_pools;
 146 }
 147 
 148 void SerialHeap::safepoint_synchronize_begin() {
 149   if (UseStringDeduplication) {
 150     SuspendibleThreadSet::synchronize();
 151   }
 152 }
 153 
 154 void SerialHeap::safepoint_synchronize_end() {
 155   if (UseStringDeduplication) {
 156     SuspendibleThreadSet::desynchronize();
 157   }
 158 }
 159 
 160 HeapWord* SerialHeap::allocate_loaded_archive_space(size_t word_size) {
 161   MutexLocker ml(Heap_lock);
 162   return old_gen()->allocate(word_size, false /* is_tlab */);
 163 }
 164 
 165 void SerialHeap::complete_loaded_archive_space(MemRegion archive_space) {
 166   assert(old_gen()->used_region().contains(archive_space), "Archive space not contained in old gen");
 167   old_gen()->complete_loaded_archive_space(archive_space);
 168 }
 169 
 170 void SerialHeap::pin_object(JavaThread* thread, oop obj) {
 171   GCLocker::lock_critical(thread);
 172 }
 173 
 174 void SerialHeap::unpin_object(JavaThread* thread, oop obj) {
 175   GCLocker::unlock_critical(thread);
 176 }
 177 
 178 jint SerialHeap::initialize() {
 179   // Allocate space for the heap.
 180 
 181   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 182 
 183   if (!heap_rs.is_reserved()) {
 184     vm_shutdown_during_initialization(
 185       "Could not reserve enough space for object heap");
 186     return JNI_ENOMEM;
 187   }
 188 
 189   initialize_reserved_region(heap_rs);
 190 
 191   ReservedSpace young_rs = heap_rs.first_part(MaxNewSize);
 192   ReservedSpace old_rs = heap_rs.last_part(MaxNewSize);
 193 
 194   _rem_set = new CardTableRS(heap_rs.region());
 195   _rem_set->initialize(young_rs.base(), old_rs.base());
 196 
 197   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 198   bs->initialize();
 199   BarrierSet::set_barrier_set(bs);
 200 
 201   _young_gen = new DefNewGeneration(young_rs, NewSize, MinNewSize, MaxNewSize);
 202   _old_gen = new TenuredGeneration(old_rs, OldSize, MinOldSize, MaxOldSize, rem_set());
 203 
 204   GCInitLogger::print();
 205 


 206   return JNI_OK;
 207 }
 208 
 209 ReservedHeapSpace SerialHeap::allocate(size_t alignment) {
 210   // Now figure out the total size.
 211   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 212   assert(alignment % pageSize == 0, "Must be");
 213 
 214   // Check for overflow.
 215   size_t total_reserved = MaxNewSize + MaxOldSize;
 216   if (total_reserved < MaxNewSize) {
 217     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 218                                   "the maximum representable size");
 219   }
 220   assert(total_reserved % alignment == 0,
 221          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 222          SIZE_FORMAT, total_reserved, alignment);
 223 
 224   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 225   size_t used_page_size = heap_rs.page_size();
 226 
 227   os::trace_page_sizes("Heap",
 228                        MinHeapSize,
 229                        total_reserved,
 230                        heap_rs.base(),
 231                        heap_rs.size(),
 232                        used_page_size);
 233 
 234   return heap_rs;
 235 }
 236 
 237 class GenIsScavengable : public BoolObjectClosure {
 238 public:
 239   bool do_object_b(oop obj) {
 240     return SerialHeap::heap()->is_in_young(obj);
 241   }
 242 };
 243 
 244 static GenIsScavengable _is_scavengable;
 245 
 246 void SerialHeap::post_initialize() {
 247   CollectedHeap::post_initialize();
 248 
 249   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 250 
 251   def_new_gen->ref_processor_init();
 252 
 253   MarkSweep::initialize();
 254 
 255   ScavengableNMethods::initialize(&_is_scavengable);
 256 }
 257 
 258 PreGenGCValues SerialHeap::get_pre_gc_values() const {
 259   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 260 
 261   return PreGenGCValues(def_new_gen->used(),
 262                         def_new_gen->capacity(),
 263                         def_new_gen->eden()->used(),
 264                         def_new_gen->eden()->capacity(),
 265                         def_new_gen->from()->used(),
 266                         def_new_gen->from()->capacity(),
 267                         old_gen()->used(),
 268                         old_gen()->capacity());
 269 }
 270 
 271 size_t SerialHeap::capacity() const {
 272   return _young_gen->capacity() + _old_gen->capacity();
 273 }
 274 
 275 size_t SerialHeap::used() const {
 276   return _young_gen->used() + _old_gen->used();
 277 }
 278 
 279 size_t SerialHeap::max_capacity() const {
 280   return _young_gen->max_capacity() + _old_gen->max_capacity();
 281 }
 282 
 283 // Return true if any of the following is true:
 284 // . the allocation won't fit into the current young gen heap
 285 // . gc locker is occupied (jni critical section)
 286 // . heap memory is tight -- the most recent previous collection
 287 //   was a full collection because a partial collection (would
 288 //   have) failed and is likely to fail again
 289 bool SerialHeap::should_try_older_generation_allocation(size_t word_size) const {
 290   size_t young_capacity = _young_gen->capacity_before_gc();
 291   return    (word_size > heap_word_size(young_capacity))
 292          || GCLocker::is_active_and_needs_gc()
 293          || incremental_collection_failed();
 294 }
 295 
 296 HeapWord* SerialHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
 297   HeapWord* result = nullptr;
 298   if (_old_gen->should_allocate(size, is_tlab)) {
 299     result = _old_gen->expand_and_allocate(size, is_tlab);
 300   }
 301   if (result == nullptr) {
 302     if (_young_gen->should_allocate(size, is_tlab)) {
 303       result = _young_gen->expand_and_allocate(size, is_tlab);
 304     }
 305   }
 306   assert(result == nullptr || is_in_reserved(result), "result not in heap");
 307   return result;
 308 }
 309 
 310 HeapWord* SerialHeap::mem_allocate_work(size_t size,
 311                                         bool is_tlab) {
 312 
 313   HeapWord* result = nullptr;
 314 
 315   // Loop until the allocation is satisfied, or unsatisfied after GC.
 316   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 317 
 318     // First allocation attempt is lock-free.
 319     Generation *young = _young_gen;
 320     if (young->should_allocate(size, is_tlab)) {
 321       result = young->par_allocate(size, is_tlab);
 322       if (result != nullptr) {
 323         assert(is_in_reserved(result), "result not in heap");
 324         return result;
 325       }
 326     }
 327     uint gc_count_before;  // Read inside the Heap_lock locked region.
 328     {
 329       MutexLocker ml(Heap_lock);
 330       log_trace(gc, alloc)("SerialHeap::mem_allocate_work: attempting locked slow path allocation");
 331       // Note that only large objects get a shot at being
 332       // allocated in later generations.
 333       bool first_only = !should_try_older_generation_allocation(size);
 334 
 335       result = attempt_allocation(size, is_tlab, first_only);
 336       if (result != nullptr) {
 337         assert(is_in_reserved(result), "result not in heap");
 338         return result;
 339       }
 340 
 341       if (GCLocker::is_active_and_needs_gc()) {
 342         if (is_tlab) {
 343           return nullptr;  // Caller will retry allocating individual object.
 344         }
 345         if (!is_maximal_no_gc()) {
 346           // Try and expand heap to satisfy request.
 347           result = expand_heap_and_allocate(size, is_tlab);
 348           // Result could be null if we are out of space.
 349           if (result != nullptr) {
 350             return result;
 351           }
 352         }
 353 
 354         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 355           return nullptr; // We didn't get to do a GC and we didn't get any memory.
 356         }
 357 
 358         // If this thread is not in a jni critical section, we stall
 359         // the requestor until the critical section has cleared and
 360         // GC allowed. When the critical section clears, a GC is
 361         // initiated by the last thread exiting the critical section; so
 362         // we retry the allocation sequence from the beginning of the loop,
 363         // rather than causing more, now probably unnecessary, GC attempts.
 364         JavaThread* jthr = JavaThread::current();
 365         if (!jthr->in_critical()) {
 366           MutexUnlocker mul(Heap_lock);
 367           // Wait for JNI critical section to be exited
 368           GCLocker::stall_until_clear();
 369           gclocker_stalled_count += 1;
 370           continue;
 371         } else {
 372           if (CheckJNICalls) {
 373             fatal("Possible deadlock due to allocating while"
 374                   " in jni critical section");
 375           }
 376           return nullptr;
 377         }
 378       }
 379 
 380       // Read the gc count while the heap lock is held.
 381       gc_count_before = total_collections();
 382     }
 383 
 384     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 385     VMThread::execute(&op);
 386     if (op.prologue_succeeded()) {
 387       result = op.result();
 388       if (op.gc_locked()) {
 389          assert(result == nullptr, "must be null if gc_locked() is true");
 390          continue;  // Retry and/or stall as necessary.
 391       }
 392 
 393       assert(result == nullptr || is_in_reserved(result),
 394              "result not in heap");
 395       return result;
 396     }
 397 
 398     // Give a warning if we seem to be looping forever.
 399     if ((QueuedAllocationWarningCount > 0) &&
 400         (try_count % QueuedAllocationWarningCount == 0)) {
 401           log_warning(gc, ergo)("SerialHeap::mem_allocate_work retries %d times,"
 402                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 403     }
 404   }
 405 }
 406 
 407 HeapWord* SerialHeap::attempt_allocation(size_t size,
 408                                          bool is_tlab,
 409                                          bool first_only) {
 410   HeapWord* res = nullptr;
 411 
 412   if (_young_gen->should_allocate(size, is_tlab)) {
 413     res = _young_gen->allocate(size, is_tlab);
 414     if (res != nullptr || first_only) {
 415       return res;
 416     }
 417   }
 418 
 419   if (_old_gen->should_allocate(size, is_tlab)) {
 420     res = _old_gen->allocate(size, is_tlab);
 421   }
 422 
 423   return res;
 424 }
 425 
 426 HeapWord* SerialHeap::mem_allocate(size_t size,
 427                                    bool* gc_overhead_limit_was_exceeded) {
 428   return mem_allocate_work(size,
 429                            false /* is_tlab */);
 430 }
 431 
 432 bool SerialHeap::must_clear_all_soft_refs() {
 433   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 434          _gc_cause == GCCause::_wb_full_gc;
 435 }
 436 
 437 void SerialHeap::collect_generation(Generation* gen, bool full, size_t size,
 438                                     bool is_tlab, bool run_verification, bool clear_soft_refs) {
 439   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 440   GCTraceTime(Trace, gc, phases) t1(title);
 441   TraceCollectorStats tcs(gen->counters());
 442   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC");
 443 
 444   gen->stat_record()->invocations++;
 445   gen->stat_record()->accumulated_time.start();
 446 
 447   // Must be done anew before each collection because
 448   // a previous collection will do mangling and will
 449   // change top of some spaces.
 450   record_gen_tops_before_GC();
 451 
 452   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 453 
 454   if (run_verification && VerifyBeforeGC) {
 455     Universe::verify("Before GC");
 456   }
 457   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 458 
 459   // Do collection work
 460   {
 461     save_marks();   // save marks for all gens
 462 
 463     gen->collect(full, clear_soft_refs, size, is_tlab);
 464   }
 465 
 466   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 467 
 468   gen->stat_record()->accumulated_time.stop();
 469 
 470   update_gc_stats(gen, full);
 471 
 472   if (run_verification && VerifyAfterGC) {
 473     Universe::verify("After GC");
 474   }
 475 }
 476 
 477 void SerialHeap::do_collection(bool full,
 478                                bool clear_all_soft_refs,
 479                                size_t size,
 480                                bool is_tlab,
 481                                GenerationType max_generation) {
 482   ResourceMark rm;
 483   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 484 
 485   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 486   assert(my_thread->is_VM_thread(), "only VM thread");
 487   assert(Heap_lock->is_locked(),
 488          "the requesting thread should have the Heap_lock");
 489   guarantee(!is_gc_active(), "collection is not reentrant");
 490 
 491   if (GCLocker::check_active_before_gc()) {
 492     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 493   }
 494 
 495   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 496                           soft_ref_policy()->should_clear_all_soft_refs();
 497 
 498   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 499 
 500   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
 501 
 502   bool complete = full && (max_generation == OldGen);
 503   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 504   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 505 
 506   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 507 
 508   bool run_verification = total_collections() >= VerifyGCStartAt;
 509   bool prepared_for_verification = false;
 510   bool do_full_collection = false;
 511 
 512   if (do_young_collection) {
 513     GCIdMark gc_id_mark;
 514     GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
 515     GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
 516 
 517     print_heap_before_gc();
 518 
 519     if (run_verification && VerifyBeforeGC) {
 520       prepare_for_verify();
 521       prepared_for_verification = true;
 522     }
 523 
 524     gc_prologue(complete);
 525     increment_total_collections(complete);
 526 
 527     collect_generation(_young_gen,
 528                        full,
 529                        size,
 530                        is_tlab,
 531                        run_verification,
 532                        do_clear_all_soft_refs);
 533 
 534     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 535         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 536       // Allocation request was met by young GC.
 537       size = 0;
 538     }
 539 
 540     // Ask if young collection is enough. If so, do the final steps for young collection,
 541     // and fallthrough to the end.
 542     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 543     if (!do_full_collection) {
 544       // Adjust generation sizes.
 545       _young_gen->compute_new_size();
 546 
 547       print_heap_change(pre_gc_values);
 548 
 549       // Track memory usage and detect low memory after GC finishes
 550       MemoryService::track_memory_usage();
 551 
 552       gc_epilogue(complete);
 553     }
 554 
 555     print_heap_after_gc();
 556 
 557   } else {
 558     // No young collection, ask if we need to perform Full collection.
 559     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 560   }
 561 
 562   if (do_full_collection) {
 563     GCIdMark gc_id_mark;
 564     GCTraceCPUTime tcpu(GenMarkSweep::gc_tracer());
 565     GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
 566 
 567     print_heap_before_gc();
 568 
 569     if (!prepared_for_verification && run_verification && VerifyBeforeGC) {
 570       prepare_for_verify();
 571     }
 572 
 573     if (!do_young_collection) {
 574       gc_prologue(complete);
 575       increment_total_collections(complete);
 576     }
 577 
 578     // Accounting quirk: total full collections would be incremented when "complete"
 579     // is set, by calling increment_total_collections above. However, we also need to
 580     // account Full collections that had "complete" unset.
 581     if (!complete) {
 582       increment_total_full_collections();
 583     }
 584 
 585     CodeCache::on_gc_marking_cycle_start();
 586 
 587     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
 588                               false /* unregister_nmethods_during_purge */,
 589                               false /* lock_codeblob_free_separately */);
 590 
 591     collect_generation(_old_gen,
 592                        full,
 593                        size,
 594                        is_tlab,
 595                        run_verification,
 596                        do_clear_all_soft_refs);
 597 
 598     CodeCache::on_gc_marking_cycle_finish();
 599     CodeCache::arm_all_nmethods();
 600 
 601     // Adjust generation sizes.
 602     _old_gen->compute_new_size();
 603     _young_gen->compute_new_size();
 604 
 605     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 606     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 607     DEBUG_ONLY(MetaspaceUtils::verify();)
 608 
 609     // Need to clear claim bits for the next mark.
 610     ClassLoaderDataGraph::clear_claimed_marks();
 611 
 612     // Resize the metaspace capacity after full collections
 613     MetaspaceGC::compute_new_size();
 614 
 615     print_heap_change(pre_gc_values);
 616 
 617     // Track memory usage and detect low memory after GC finishes
 618     MemoryService::track_memory_usage();
 619 
 620     // Need to tell the epilogue code we are done with Full GC, regardless what was
 621     // the initial value for "complete" flag.
 622     gc_epilogue(true);
 623 
 624     print_heap_after_gc();
 625   }
 626 }
 627 
 628 bool SerialHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 629                                            SerialHeap::GenerationType max_gen) const {
 630   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 631 }
 632 
 633 void SerialHeap::register_nmethod(nmethod* nm) {
 634   ScavengableNMethods::register_nmethod(nm);
 635 }
 636 
 637 void SerialHeap::unregister_nmethod(nmethod* nm) {
 638   ScavengableNMethods::unregister_nmethod(nm);
 639 }
 640 
 641 void SerialHeap::verify_nmethod(nmethod* nm) {
 642   ScavengableNMethods::verify_nmethod(nm);
 643 }
 644 
 645 void SerialHeap::prune_scavengable_nmethods() {
 646   ScavengableNMethods::prune_nmethods_not_into_young();
 647 }
 648 
 649 void SerialHeap::prune_unlinked_nmethods() {
 650   ScavengableNMethods::prune_unlinked_nmethods();
 651 }
 652 
 653 HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 654   GCCauseSetter x(this, GCCause::_allocation_failure);
 655   HeapWord* result = nullptr;
 656 
 657   assert(size != 0, "Precondition violated");
 658   if (GCLocker::is_active_and_needs_gc()) {
 659     // GC locker is active; instead of a collection we will attempt
 660     // to expand the heap, if there's room for expansion.
 661     if (!is_maximal_no_gc()) {
 662       result = expand_heap_and_allocate(size, is_tlab);
 663     }
 664     return result;   // Could be null if we are out of space.
 665   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 666     // Do an incremental collection.
 667     do_collection(false,                     // full
 668                   false,                     // clear_all_soft_refs
 669                   size,                      // size
 670                   is_tlab,                   // is_tlab
 671                   SerialHeap::OldGen); // max_generation
 672   } else {
 673     log_trace(gc)(" :: Trying full because partial may fail :: ");
 674     // Try a full collection; see delta for bug id 6266275
 675     // for the original code and why this has been simplified
 676     // with from-space allocation criteria modified and
 677     // such allocation moved out of the safepoint path.
 678     do_collection(true,                      // full
 679                   false,                     // clear_all_soft_refs
 680                   size,                      // size
 681                   is_tlab,                   // is_tlab
 682                   SerialHeap::OldGen); // max_generation
 683   }
 684 
 685   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 686 
 687   if (result != nullptr) {
 688     assert(is_in_reserved(result), "result not in heap");
 689     return result;
 690   }
 691 
 692   // OK, collection failed, try expansion.
 693   result = expand_heap_and_allocate(size, is_tlab);
 694   if (result != nullptr) {
 695     return result;
 696   }
 697 
 698   // If we reach this point, we're really out of memory. Try every trick
 699   // we can to reclaim memory. Force collection of soft references. Force
 700   // a complete compaction of the heap. Any additional methods for finding
 701   // free memory should be here, especially if they are expensive. If this
 702   // attempt fails, an OOM exception will be thrown.
 703   {
 704     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 705 
 706     do_collection(true,                      // full
 707                   true,                      // clear_all_soft_refs
 708                   size,                      // size
 709                   is_tlab,                   // is_tlab
 710                   SerialHeap::OldGen); // max_generation
 711   }
 712 
 713   result = attempt_allocation(size, is_tlab, false /* first_only */);
 714   if (result != nullptr) {
 715     assert(is_in_reserved(result), "result not in heap");
 716     return result;
 717   }
 718 
 719   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 720     "Flag should have been handled and cleared prior to this point");
 721 
 722   // What else?  We might try synchronous finalization later.  If the total
 723   // space available is large enough for the allocation, then a more
 724   // complete compaction phase than we've tried so far might be
 725   // appropriate.
 726   return nullptr;
 727 }
 728 
 729 #ifdef ASSERT
 730 class AssertNonScavengableClosure: public OopClosure {
 731 public:
 732   virtual void do_oop(oop* p) {
 733     assert(!SerialHeap::heap()->is_in_partial_collection(*p),
 734       "Referent should not be scavengable.");  }
 735   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 736 };
 737 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 738 #endif
 739 
 740 void SerialHeap::process_roots(ScanningOption so,
 741                                OopClosure* strong_roots,
 742                                CLDClosure* strong_cld_closure,
 743                                CLDClosure* weak_cld_closure,
 744                                CodeBlobToOopClosure* code_roots) {
 745   // General roots.
 746   assert(code_roots != nullptr, "code root closure should always be set");
 747 
 748   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 749 
 750   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 751   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
 752 
 753   Threads::oops_do(strong_roots, roots_from_code_p);
 754 
 755   OopStorageSet::strong_oops_do(strong_roots);
 756 
 757   if (so & SO_ScavengeCodeCache) {
 758     assert(code_roots != nullptr, "must supply closure for code cache");
 759 
 760     // We only visit parts of the CodeCache when scavenging.
 761     ScavengableNMethods::nmethods_do(code_roots);
 762   }
 763   if (so & SO_AllCodeCache) {
 764     assert(code_roots != nullptr, "must supply closure for code cache");
 765 
 766     // CMSCollector uses this to do intermediate-strength collections.
 767     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 768     CodeCache::blobs_do(code_roots);
 769   }
 770   // Verify that the code cache contents are not subject to
 771   // movement by a scavenging collection.
 772   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 773   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 774 }
 775 
 776 bool SerialHeap::no_allocs_since_save_marks() {
 777   return _young_gen->no_allocs_since_save_marks() &&
 778          _old_gen->no_allocs_since_save_marks();
 779 }
 780 
 781 // public collection interfaces
 782 void SerialHeap::collect(GCCause::Cause cause) {
 783   // The caller doesn't have the Heap_lock
 784   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 785 
 786   unsigned int gc_count_before;
 787   unsigned int full_gc_count_before;
 788 
 789   {
 790     MutexLocker ml(Heap_lock);
 791     // Read the GC count while holding the Heap_lock
 792     gc_count_before      = total_collections();
 793     full_gc_count_before = total_full_collections();
 794   }
 795 
 796   if (GCLocker::should_discard(cause, gc_count_before)) {
 797     return;
 798   }
 799 
 800   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
 801                            || (cause == GCCause::_gc_locker)
 802                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
 803 
 804   const GenerationType max_generation = should_run_young_gc
 805                                       ? YoungGen
 806                                       : OldGen;
 807 
 808   while (true) {
 809     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 810                          cause, max_generation);
 811     VMThread::execute(&op);
 812 
 813     if (!GCCause::is_explicit_full_gc(cause)) {
 814       return;
 815     }
 816 
 817     {
 818       MutexLocker ml(Heap_lock);
 819       // Read the GC count while holding the Heap_lock
 820       if (full_gc_count_before != total_full_collections()) {
 821         return;
 822       }
 823     }
 824 
 825     if (GCLocker::is_active_and_needs_gc()) {
 826       // If GCLocker is active, wait until clear before retrying.
 827       GCLocker::stall_until_clear();
 828     }
 829   }
 830 }
 831 
 832 void SerialHeap::do_full_collection(bool clear_all_soft_refs) {
 833    do_full_collection(clear_all_soft_refs, OldGen);
 834 }
 835 
 836 void SerialHeap::do_full_collection(bool clear_all_soft_refs,
 837                                     GenerationType last_generation) {
 838   do_collection(true,                   // full
 839                 clear_all_soft_refs,    // clear_all_soft_refs
 840                 0,                      // size
 841                 false,                  // is_tlab
 842                 last_generation);       // last_generation
 843   // Hack XXX FIX ME !!!
 844   // A scavenge may not have been attempted, or may have
 845   // been attempted and failed, because the old gen was too full
 846   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 847     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 848     // This time allow the old gen to be collected as well
 849     do_collection(true,                // full
 850                   clear_all_soft_refs, // clear_all_soft_refs
 851                   0,                   // size
 852                   false,               // is_tlab
 853                   OldGen);             // last_generation
 854   }
 855 }
 856 
 857 bool SerialHeap::is_in_young(const void* p) const {
 858   bool result = p < _old_gen->reserved().start();
 859   assert(result == _young_gen->is_in_reserved(p),
 860          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
 861   return result;
 862 }
 863 
 864 bool SerialHeap::requires_barriers(stackChunkOop obj) const {
 865   return !is_in_young(obj);
 866 }
 867 
 868 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 869 bool SerialHeap::is_in(const void* p) const {
 870   return _young_gen->is_in(p) || _old_gen->is_in(p);
 871 }
 872 
 873 #ifdef ASSERT
 874 // Don't implement this by using is_in_young().  This method is used
 875 // in some cases to check that is_in_young() is correct.
 876 bool SerialHeap::is_in_partial_collection(const void* p) {
 877   assert(is_in_reserved(p) || p == nullptr,
 878     "Does not work if address is non-null and outside of the heap");
 879   return p < _young_gen->reserved().end() && p != nullptr;
 880 }
 881 #endif
 882 
 883 void SerialHeap::object_iterate(ObjectClosure* cl) {
 884   _young_gen->object_iterate(cl);
 885   _old_gen->object_iterate(cl);
 886 }
 887 
 888 HeapWord* SerialHeap::block_start(const void* addr) const {
 889   assert(is_in_reserved(addr), "block_start of address outside of heap");
 890   if (_young_gen->is_in_reserved(addr)) {
 891     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 892     return _young_gen->block_start(addr);
 893   }
 894 
 895   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 896   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 897   return _old_gen->block_start(addr);
 898 }
 899 
 900 bool SerialHeap::block_is_obj(const HeapWord* addr) const {
 901   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 902   assert(block_start(addr) == addr, "addr must be a block start");
 903 
 904   if (_young_gen->is_in_reserved(addr)) {
 905     return _young_gen->eden()->is_in(addr)
 906         || _young_gen->from()->is_in(addr)
 907         || _young_gen->to()  ->is_in(addr);
 908   }
 909 
 910   assert(_old_gen->is_in_reserved(addr), "must be in old-gen");
 911   return addr < _old_gen->space()->top();
 912 }
 913 
 914 size_t SerialHeap::tlab_capacity(Thread* thr) const {
 915   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 916   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 917   return _young_gen->tlab_capacity();
 918 }
 919 
 920 size_t SerialHeap::tlab_used(Thread* thr) const {
 921   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 922   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 923   return _young_gen->tlab_used();
 924 }
 925 
 926 size_t SerialHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 927   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 928   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 929   return _young_gen->unsafe_max_tlab_alloc();
 930 }
 931 
 932 HeapWord* SerialHeap::allocate_new_tlab(size_t min_size,
 933                                         size_t requested_size,
 934                                         size_t* actual_size) {
 935   HeapWord* result = mem_allocate_work(requested_size /* size */,
 936                                        true /* is_tlab */);
 937   if (result != nullptr) {
 938     *actual_size = requested_size;
 939   }
 940 
 941   return result;
 942 }
 943 
 944 void SerialHeap::prepare_for_verify() {
 945   ensure_parsability(false);        // no need to retire TLABs
 946 }
 947 
 948 bool SerialHeap::is_maximal_no_gc() const {
 949   // We don't expand young-gen except at a GC.
 950   return _old_gen->is_maximal_no_gc();
 951 }
 952 
 953 void SerialHeap::save_marks() {
 954   _young_gen->save_marks();
 955   _old_gen->save_marks();
 956 }
 957 
 958 void SerialHeap::verify(VerifyOption option /* ignored */) {
 959   log_debug(gc, verify)("%s", _old_gen->name());
 960   _old_gen->verify();
 961 
 962   log_debug(gc, verify)("%s", _young_gen->name());
 963   _young_gen->verify();
 964 
 965   log_debug(gc, verify)("RemSet");
 966   rem_set()->verify();
 967 }
 968 
 969 void SerialHeap::print_on(outputStream* st) const {
 970   if (_young_gen != nullptr) {
 971     _young_gen->print_on(st);
 972   }
 973   if (_old_gen != nullptr) {
 974     _old_gen->print_on(st);
 975   }
 976   MetaspaceUtils::print_on(st);
 977 }
 978 
 979 void SerialHeap::gc_threads_do(ThreadClosure* tc) const {
 980 }
 981 
 982 bool SerialHeap::print_location(outputStream* st, void* addr) const {
 983   return BlockLocationPrinter<SerialHeap>::print_location(st, addr);
 984 }
 985 
 986 void SerialHeap::print_tracing_info() const {
 987   if (log_is_enabled(Debug, gc, heap, exit)) {
 988     LogStreamHandle(Debug, gc, heap, exit) lsh;
 989     _young_gen->print_summary_info_on(&lsh);
 990     _old_gen->print_summary_info_on(&lsh);
 991   }
 992 }
 993 
 994 void SerialHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 995   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 996 
 997   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
 998                      HEAP_CHANGE_FORMAT" "
 999                      HEAP_CHANGE_FORMAT,
1000                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1001                                              pre_gc_values.young_gen_used(),
1002                                              pre_gc_values.young_gen_capacity(),
1003                                              def_new_gen->used(),
1004                                              def_new_gen->capacity()),
1005                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1006                                              pre_gc_values.eden_used(),
1007                                              pre_gc_values.eden_capacity(),
1008                                              def_new_gen->eden()->used(),
1009                                              def_new_gen->eden()->capacity()),
1010                      HEAP_CHANGE_FORMAT_ARGS("From",
1011                                              pre_gc_values.from_used(),
1012                                              pre_gc_values.from_capacity(),
1013                                              def_new_gen->from()->used(),
1014                                              def_new_gen->from()->capacity()));
1015   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1016                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1017                                              pre_gc_values.old_gen_used(),
1018                                              pre_gc_values.old_gen_capacity(),
1019                                              old_gen()->used(),
1020                                              old_gen()->capacity()));
1021   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1022 }
1023 
1024 void SerialHeap::gc_prologue(bool full) {
1025   // Fill TLAB's and such
1026   ensure_parsability(true);   // retire TLABs
1027 
1028   _old_gen->gc_prologue();
1029 };
1030 
1031 void SerialHeap::gc_epilogue(bool full) {
1032 #if COMPILER2_OR_JVMCI
1033   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1034 #endif // COMPILER2_OR_JVMCI
1035 
1036   resize_all_tlabs();
1037 
1038   _young_gen->gc_epilogue(full);
1039   _old_gen->gc_epilogue();
1040 
1041   MetaspaceCounters::update_performance_counters();
1042 };
1043 
1044 #ifndef PRODUCT
1045 void SerialHeap::record_gen_tops_before_GC() {
1046   if (ZapUnusedHeapArea) {
1047     _young_gen->record_spaces_top();
1048     _old_gen->record_spaces_top();
1049   }
1050 }
1051 #endif  // not PRODUCT
--- EOF ---