1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/serial/cardTableRS.hpp"
  34 #include "gc/serial/defNewGeneration.hpp"
  35 #include "gc/serial/genMarkSweep.hpp"
  36 #include "gc/serial/markSweep.hpp"
  37 #include "gc/shared/adaptiveSizePolicy.hpp"
  38 #include "gc/shared/cardTableBarrierSet.hpp"
  39 #include "gc/shared/classUnloadingContext.hpp"
  40 #include "gc/shared/collectedHeap.inline.hpp"
  41 #include "gc/shared/collectorCounters.hpp"
  42 #include "gc/shared/continuationGCSupport.inline.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcInitLogger.hpp"
  45 #include "gc/shared/gcLocker.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTrace.hpp"
  48 #include "gc/shared/gcTraceTime.inline.hpp"
  49 #include "gc/shared/gcVMOperations.hpp"
  50 #include "gc/shared/genArguments.hpp"
  51 #include "gc/shared/genCollectedHeap.hpp"
  52 #include "gc/shared/generationSpec.hpp"
  53 #include "gc/shared/locationPrinter.inline.hpp"
  54 #include "gc/shared/oopStorage.inline.hpp"
  55 #include "gc/shared/oopStorageParState.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/scavengableNMethods.hpp"
  58 #include "gc/shared/slidingForwarding.hpp"
  59 #include "gc/shared/space.hpp"
  60 #include "gc/shared/strongRootsScope.hpp"
  61 #include "gc/shared/weakProcessor.hpp"
  62 #include "gc/shared/workerThread.hpp"
  63 #include "memory/iterator.hpp"
  64 #include "memory/metaspaceCounters.hpp"
  65 #include "memory/metaspaceUtils.hpp"
  66 #include "memory/resourceArea.hpp"
  67 #include "memory/universe.hpp"
  68 #include "oops/oop.inline.hpp"
  69 #include "runtime/handles.hpp"
  70 #include "runtime/handles.inline.hpp"
  71 #include "runtime/java.hpp"
  72 #include "runtime/threads.hpp"
  73 #include "runtime/vmThread.hpp"
  74 #include "services/memoryService.hpp"
  75 #include "utilities/autoRestore.hpp"
  76 #include "utilities/debug.hpp"
  77 #include "utilities/formatBuffer.hpp"
  78 #include "utilities/macros.hpp"
  79 #include "utilities/stack.inline.hpp"
  80 #include "utilities/vmError.hpp"
  81 #if INCLUDE_JVMCI
  82 #include "jvmci/jvmci.hpp"
  83 #endif
  84 
  85 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  86                                    Generation::Name old,
  87                                    const char* policy_counters_name) :
  88   CollectedHeap(),
  89   _young_gen(nullptr),
  90   _old_gen(nullptr),
  91   _young_gen_spec(new GenerationSpec(young,
  92                                      NewSize,
  93                                      MaxNewSize,
  94                                      GenAlignment)),
  95   _old_gen_spec(new GenerationSpec(old,
  96                                    OldSize,
  97                                    MaxOldSize,
  98                                    GenAlignment)),
  99   _rem_set(nullptr),
 100   _soft_ref_gen_policy(),
 101   _size_policy(nullptr),
 102   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
 103   _incremental_collection_failed(false),
 104   _full_collections_completed(0),
 105   _young_manager(nullptr),
 106   _old_manager(nullptr) {
 107 }
 108 
 109 jint GenCollectedHeap::initialize() {
 110   // Allocate space for the heap.
 111 
 112   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 113 
 114   if (!heap_rs.is_reserved()) {
 115     vm_shutdown_during_initialization(
 116       "Could not reserve enough space for object heap");
 117     return JNI_ENOMEM;
 118   }
 119 
 120   initialize_reserved_region(heap_rs);
 121 
 122   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
 123   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 124 
 125   _rem_set = create_rem_set(heap_rs.region());
 126   _rem_set->initialize(young_rs.base(), old_rs.base());
 127 
 128   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 129   bs->initialize();
 130   BarrierSet::set_barrier_set(bs);
 131 
 132   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 133   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 134 
 135   GCInitLogger::print();
 136 
 137   SlidingForwarding::initialize(_reserved, SpaceAlignment / HeapWordSize);
 138 
 139   return JNI_OK;
 140 }
 141 
 142 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 143   return new CardTableRS(reserved_region);
 144 }
 145 
 146 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
 147                                               size_t init_promo_size,
 148                                               size_t init_survivor_size) {
 149   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
 150   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 151                                         init_promo_size,
 152                                         init_survivor_size,
 153                                         max_gc_pause_sec,
 154                                         GCTimeRatio);
 155 }
 156 
 157 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
 158   // Now figure out the total size.
 159   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 160   assert(alignment % pageSize == 0, "Must be");
 161 
 162   // Check for overflow.
 163   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 164   if (total_reserved < _young_gen_spec->max_size()) {
 165     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 166                                   "the maximum representable size");
 167   }
 168   assert(total_reserved % alignment == 0,
 169          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 170          SIZE_FORMAT, total_reserved, alignment);
 171 
 172   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 173   size_t used_page_size = heap_rs.page_size();
 174 
 175   os::trace_page_sizes("Heap",
 176                        MinHeapSize,
 177                        total_reserved,
 178                        used_page_size,
 179                        heap_rs.base(),
 180                        heap_rs.size());
 181 
 182   return heap_rs;
 183 }
 184 
 185 class GenIsScavengable : public BoolObjectClosure {
 186 public:
 187   bool do_object_b(oop obj) {
 188     return GenCollectedHeap::heap()->is_in_young(obj);
 189   }
 190 };
 191 
 192 static GenIsScavengable _is_scavengable;
 193 
 194 void GenCollectedHeap::post_initialize() {
 195   CollectedHeap::post_initialize();
 196 
 197   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 198 
 199   def_new_gen->ref_processor_init();
 200 
 201   initialize_size_policy(def_new_gen->eden()->capacity(),
 202                          _old_gen->capacity(),
 203                          def_new_gen->from()->capacity());
 204 
 205   MarkSweep::initialize();
 206 
 207   ScavengableNMethods::initialize(&_is_scavengable);
 208 }
 209 
 210 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
 211   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 212 
 213   return PreGenGCValues(def_new_gen->used(),
 214                         def_new_gen->capacity(),
 215                         def_new_gen->eden()->used(),
 216                         def_new_gen->eden()->capacity(),
 217                         def_new_gen->from()->used(),
 218                         def_new_gen->from()->capacity(),
 219                         old_gen()->used(),
 220                         old_gen()->capacity());
 221 }
 222 
 223 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 224   return _young_gen_spec;
 225 }
 226 
 227 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 228   return _old_gen_spec;
 229 }
 230 
 231 size_t GenCollectedHeap::capacity() const {
 232   return _young_gen->capacity() + _old_gen->capacity();
 233 }
 234 
 235 size_t GenCollectedHeap::used() const {
 236   return _young_gen->used() + _old_gen->used();
 237 }
 238 
 239 void GenCollectedHeap::save_used_regions() {
 240   _old_gen->save_used_region();
 241   _young_gen->save_used_region();
 242 }
 243 
 244 size_t GenCollectedHeap::max_capacity() const {
 245   return _young_gen->max_capacity() + _old_gen->max_capacity();
 246 }
 247 
 248 // Update the _full_collections_completed counter
 249 // at the end of a stop-world full GC.
 250 unsigned int GenCollectedHeap::update_full_collections_completed() {
 251   assert(_full_collections_completed <= _total_full_collections,
 252          "Can't complete more collections than were started");
 253   _full_collections_completed = _total_full_collections;
 254   return _full_collections_completed;
 255 }
 256 
 257 // Return true if any of the following is true:
 258 // . the allocation won't fit into the current young gen heap
 259 // . gc locker is occupied (jni critical section)
 260 // . heap memory is tight -- the most recent previous collection
 261 //   was a full collection because a partial collection (would
 262 //   have) failed and is likely to fail again
 263 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 264   size_t young_capacity = _young_gen->capacity_before_gc();
 265   return    (word_size > heap_word_size(young_capacity))
 266          || GCLocker::is_active_and_needs_gc()
 267          || incremental_collection_failed();
 268 }
 269 
 270 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 271   HeapWord* result = nullptr;
 272   if (_old_gen->should_allocate(size, is_tlab)) {
 273     result = _old_gen->expand_and_allocate(size, is_tlab);
 274   }
 275   if (result == nullptr) {
 276     if (_young_gen->should_allocate(size, is_tlab)) {
 277       result = _young_gen->expand_and_allocate(size, is_tlab);
 278     }
 279   }
 280   assert(result == nullptr || is_in_reserved(result), "result not in heap");
 281   return result;
 282 }
 283 
 284 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 285                                               bool is_tlab) {
 286 
 287   HeapWord* result = nullptr;
 288 
 289   // Loop until the allocation is satisfied, or unsatisfied after GC.
 290   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 291 
 292     // First allocation attempt is lock-free.
 293     Generation *young = _young_gen;
 294     if (young->should_allocate(size, is_tlab)) {
 295       result = young->par_allocate(size, is_tlab);
 296       if (result != nullptr) {
 297         assert(is_in_reserved(result), "result not in heap");
 298         return result;
 299       }
 300     }
 301     uint gc_count_before;  // Read inside the Heap_lock locked region.
 302     {
 303       MutexLocker ml(Heap_lock);
 304       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 305       // Note that only large objects get a shot at being
 306       // allocated in later generations.
 307       bool first_only = !should_try_older_generation_allocation(size);
 308 
 309       result = attempt_allocation(size, is_tlab, first_only);
 310       if (result != nullptr) {
 311         assert(is_in_reserved(result), "result not in heap");
 312         return result;
 313       }
 314 
 315       if (GCLocker::is_active_and_needs_gc()) {
 316         if (is_tlab) {
 317           return nullptr;  // Caller will retry allocating individual object.
 318         }
 319         if (!is_maximal_no_gc()) {
 320           // Try and expand heap to satisfy request.
 321           result = expand_heap_and_allocate(size, is_tlab);
 322           // Result could be null if we are out of space.
 323           if (result != nullptr) {
 324             return result;
 325           }
 326         }
 327 
 328         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 329           return nullptr; // We didn't get to do a GC and we didn't get any memory.
 330         }
 331 
 332         // If this thread is not in a jni critical section, we stall
 333         // the requestor until the critical section has cleared and
 334         // GC allowed. When the critical section clears, a GC is
 335         // initiated by the last thread exiting the critical section; so
 336         // we retry the allocation sequence from the beginning of the loop,
 337         // rather than causing more, now probably unnecessary, GC attempts.
 338         JavaThread* jthr = JavaThread::current();
 339         if (!jthr->in_critical()) {
 340           MutexUnlocker mul(Heap_lock);
 341           // Wait for JNI critical section to be exited
 342           GCLocker::stall_until_clear();
 343           gclocker_stalled_count += 1;
 344           continue;
 345         } else {
 346           if (CheckJNICalls) {
 347             fatal("Possible deadlock due to allocating while"
 348                   " in jni critical section");
 349           }
 350           return nullptr;
 351         }
 352       }
 353 
 354       // Read the gc count while the heap lock is held.
 355       gc_count_before = total_collections();
 356     }
 357 
 358     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 359     VMThread::execute(&op);
 360     if (op.prologue_succeeded()) {
 361       result = op.result();
 362       if (op.gc_locked()) {
 363          assert(result == nullptr, "must be null if gc_locked() is true");
 364          continue;  // Retry and/or stall as necessary.
 365       }
 366 
 367       assert(result == nullptr || is_in_reserved(result),
 368              "result not in heap");
 369       return result;
 370     }
 371 
 372     // Give a warning if we seem to be looping forever.
 373     if ((QueuedAllocationWarningCount > 0) &&
 374         (try_count % QueuedAllocationWarningCount == 0)) {
 375           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 376                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 377     }
 378   }
 379 }
 380 
 381 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 382                                                bool is_tlab,
 383                                                bool first_only) {
 384   HeapWord* res = nullptr;
 385 
 386   if (_young_gen->should_allocate(size, is_tlab)) {
 387     res = _young_gen->allocate(size, is_tlab);
 388     if (res != nullptr || first_only) {
 389       return res;
 390     }
 391   }
 392 
 393   if (_old_gen->should_allocate(size, is_tlab)) {
 394     res = _old_gen->allocate(size, is_tlab);
 395   }
 396 
 397   return res;
 398 }
 399 
 400 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 401                                          bool* gc_overhead_limit_was_exceeded) {
 402   return mem_allocate_work(size,
 403                            false /* is_tlab */);
 404 }
 405 
 406 bool GenCollectedHeap::must_clear_all_soft_refs() {
 407   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 408          _gc_cause == GCCause::_wb_full_gc;
 409 }
 410 
 411 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 412                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
 413   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 414   GCTraceTime(Trace, gc, phases) t1(title);
 415   TraceCollectorStats tcs(gen->counters());
 416   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC");
 417 
 418   gen->stat_record()->invocations++;
 419   gen->stat_record()->accumulated_time.start();
 420 
 421   // Must be done anew before each collection because
 422   // a previous collection will do mangling and will
 423   // change top of some spaces.
 424   record_gen_tops_before_GC();
 425 
 426   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 427 
 428   if (run_verification && VerifyBeforeGC) {
 429     Universe::verify("Before GC");
 430   }
 431   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 432 
 433   // Do collection work
 434   {
 435     save_marks();   // save marks for all gens
 436 
 437     gen->collect(full, clear_soft_refs, size, is_tlab);
 438   }
 439 
 440   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 441 
 442   gen->stat_record()->accumulated_time.stop();
 443 
 444   update_gc_stats(gen, full);
 445 
 446   if (run_verification && VerifyAfterGC) {
 447     Universe::verify("After GC");
 448   }
 449 }
 450 
 451 void GenCollectedHeap::do_collection(bool           full,
 452                                      bool           clear_all_soft_refs,
 453                                      size_t         size,
 454                                      bool           is_tlab,
 455                                      GenerationType max_generation) {
 456   ResourceMark rm;
 457   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 458 
 459   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 460   assert(my_thread->is_VM_thread(), "only VM thread");
 461   assert(Heap_lock->is_locked(),
 462          "the requesting thread should have the Heap_lock");
 463   guarantee(!is_stw_gc_active(), "collection is not reentrant");
 464 
 465   if (GCLocker::check_active_before_gc()) {
 466     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 467   }
 468 
 469   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 470                           soft_ref_policy()->should_clear_all_soft_refs();
 471 
 472   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 473 
 474   AutoModifyRestore<bool> temporarily(_is_stw_gc_active, true);
 475 
 476   bool complete = full && (max_generation == OldGen);
 477   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 478   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 479 
 480   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 481 
 482   bool run_verification = total_collections() >= VerifyGCStartAt;
 483   bool prepared_for_verification = false;
 484   bool do_full_collection = false;
 485 
 486   if (do_young_collection) {
 487     GCIdMark gc_id_mark;
 488     GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
 489     GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
 490 
 491     print_heap_before_gc();
 492 
 493     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 494       prepare_for_verify();
 495       prepared_for_verification = true;
 496     }
 497 
 498     gc_prologue(complete);
 499     increment_total_collections(complete);
 500 
 501     collect_generation(_young_gen,
 502                        full,
 503                        size,
 504                        is_tlab,
 505                        run_verification && VerifyGCLevel <= 0,
 506                        do_clear_all_soft_refs);
 507 
 508     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 509         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 510       // Allocation request was met by young GC.
 511       size = 0;
 512     }
 513 
 514     // Ask if young collection is enough. If so, do the final steps for young collection,
 515     // and fallthrough to the end.
 516     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 517     if (!do_full_collection) {
 518       // Adjust generation sizes.
 519       _young_gen->compute_new_size();
 520 
 521       print_heap_change(pre_gc_values);
 522 
 523       // Track memory usage and detect low memory after GC finishes
 524       MemoryService::track_memory_usage();
 525 
 526       gc_epilogue(complete);
 527     }
 528 
 529     print_heap_after_gc();
 530 
 531   } else {
 532     // No young collection, ask if we need to perform Full collection.
 533     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 534   }
 535 
 536   if (do_full_collection) {
 537     GCIdMark gc_id_mark;
 538     GCTraceCPUTime tcpu(GenMarkSweep::gc_tracer());
 539     GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
 540 
 541     print_heap_before_gc();
 542 
 543     if (!prepared_for_verification && run_verification &&
 544         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 545       prepare_for_verify();
 546     }
 547 
 548     if (!do_young_collection) {
 549       gc_prologue(complete);
 550       increment_total_collections(complete);
 551     }
 552 
 553     // Accounting quirk: total full collections would be incremented when "complete"
 554     // is set, by calling increment_total_collections above. However, we also need to
 555     // account Full collections that had "complete" unset.
 556     if (!complete) {
 557       increment_total_full_collections();
 558     }
 559 
 560     CodeCache::on_gc_marking_cycle_start();
 561 
 562     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
 563                               false /* unregister_nmethods_during_purge */,
 564                               false /* lock_codeblob_free_separately */);
 565 
 566     collect_generation(_old_gen,
 567                        full,
 568                        size,
 569                        is_tlab,
 570                        run_verification && VerifyGCLevel <= 1,
 571                        do_clear_all_soft_refs);
 572 
 573     CodeCache::on_gc_marking_cycle_finish();
 574     CodeCache::arm_all_nmethods();
 575 
 576     // Adjust generation sizes.
 577     _old_gen->compute_new_size();
 578     _young_gen->compute_new_size();
 579 
 580     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 581     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 582     DEBUG_ONLY(MetaspaceUtils::verify();)
 583 
 584     // Need to clear claim bits for the next mark.
 585     ClassLoaderDataGraph::clear_claimed_marks();
 586 
 587     // Resize the metaspace capacity after full collections
 588     MetaspaceGC::compute_new_size();
 589     update_full_collections_completed();
 590 
 591     print_heap_change(pre_gc_values);
 592 
 593     // Track memory usage and detect low memory after GC finishes
 594     MemoryService::track_memory_usage();
 595 
 596     // Need to tell the epilogue code we are done with Full GC, regardless what was
 597     // the initial value for "complete" flag.
 598     gc_epilogue(true);
 599 
 600     print_heap_after_gc();
 601   }
 602 }
 603 
 604 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 605                                                  GenCollectedHeap::GenerationType max_gen) const {
 606   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 607 }
 608 
 609 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 610   ScavengableNMethods::register_nmethod(nm);
 611 }
 612 
 613 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 614   ScavengableNMethods::unregister_nmethod(nm);
 615 }
 616 
 617 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 618   ScavengableNMethods::verify_nmethod(nm);
 619 }
 620 
 621 void GenCollectedHeap::prune_scavengable_nmethods() {
 622   ScavengableNMethods::prune_nmethods_not_into_young();
 623 }
 624 
 625 void GenCollectedHeap::prune_unlinked_nmethods() {
 626   ScavengableNMethods::prune_unlinked_nmethods();
 627 }
 628 
 629 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 630   GCCauseSetter x(this, GCCause::_allocation_failure);
 631   HeapWord* result = nullptr;
 632 
 633   assert(size != 0, "Precondition violated");
 634   if (GCLocker::is_active_and_needs_gc()) {
 635     // GC locker is active; instead of a collection we will attempt
 636     // to expand the heap, if there's room for expansion.
 637     if (!is_maximal_no_gc()) {
 638       result = expand_heap_and_allocate(size, is_tlab);
 639     }
 640     return result;   // Could be null if we are out of space.
 641   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 642     // Do an incremental collection.
 643     do_collection(false,                     // full
 644                   false,                     // clear_all_soft_refs
 645                   size,                      // size
 646                   is_tlab,                   // is_tlab
 647                   GenCollectedHeap::OldGen); // max_generation
 648   } else {
 649     log_trace(gc)(" :: Trying full because partial may fail :: ");
 650     // Try a full collection; see delta for bug id 6266275
 651     // for the original code and why this has been simplified
 652     // with from-space allocation criteria modified and
 653     // such allocation moved out of the safepoint path.
 654     do_collection(true,                      // full
 655                   false,                     // clear_all_soft_refs
 656                   size,                      // size
 657                   is_tlab,                   // is_tlab
 658                   GenCollectedHeap::OldGen); // max_generation
 659   }
 660 
 661   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 662 
 663   if (result != nullptr) {
 664     assert(is_in_reserved(result), "result not in heap");
 665     return result;
 666   }
 667 
 668   // OK, collection failed, try expansion.
 669   result = expand_heap_and_allocate(size, is_tlab);
 670   if (result != nullptr) {
 671     return result;
 672   }
 673 
 674   // If we reach this point, we're really out of memory. Try every trick
 675   // we can to reclaim memory. Force collection of soft references. Force
 676   // a complete compaction of the heap. Any additional methods for finding
 677   // free memory should be here, especially if they are expensive. If this
 678   // attempt fails, an OOM exception will be thrown.
 679   {
 680     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 681 
 682     do_collection(true,                      // full
 683                   true,                      // clear_all_soft_refs
 684                   size,                      // size
 685                   is_tlab,                   // is_tlab
 686                   GenCollectedHeap::OldGen); // max_generation
 687   }
 688 
 689   result = attempt_allocation(size, is_tlab, false /* first_only */);
 690   if (result != nullptr) {
 691     assert(is_in_reserved(result), "result not in heap");
 692     return result;
 693   }
 694 
 695   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 696     "Flag should have been handled and cleared prior to this point");
 697 
 698   // What else?  We might try synchronous finalization later.  If the total
 699   // space available is large enough for the allocation, then a more
 700   // complete compaction phase than we've tried so far might be
 701   // appropriate.
 702   return nullptr;
 703 }
 704 
 705 #ifdef ASSERT
 706 class AssertNonScavengableClosure: public OopClosure {
 707 public:
 708   virtual void do_oop(oop* p) {
 709     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 710       "Referent should not be scavengable.");  }
 711   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 712 };
 713 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 714 #endif
 715 
 716 void GenCollectedHeap::process_roots(ScanningOption so,
 717                                      OopClosure* strong_roots,
 718                                      CLDClosure* strong_cld_closure,
 719                                      CLDClosure* weak_cld_closure,
 720                                      CodeBlobToOopClosure* code_roots) {
 721   // General roots.
 722   assert(code_roots != nullptr, "code root closure should always be set");
 723 
 724   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 725 
 726   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 727   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
 728 
 729   Threads::oops_do(strong_roots, roots_from_code_p);
 730 
 731   OopStorageSet::strong_oops_do(strong_roots);
 732 
 733   if (so & SO_ScavengeCodeCache) {
 734     assert(code_roots != nullptr, "must supply closure for code cache");
 735 
 736     // We only visit parts of the CodeCache when scavenging.
 737     ScavengableNMethods::nmethods_do(code_roots);
 738   }
 739   if (so & SO_AllCodeCache) {
 740     assert(code_roots != nullptr, "must supply closure for code cache");
 741 
 742     // CMSCollector uses this to do intermediate-strength collections.
 743     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 744     CodeCache::blobs_do(code_roots);
 745   }
 746   // Verify that the code cache contents are not subject to
 747   // movement by a scavenging collection.
 748   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 749   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 750 }
 751 
 752 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 753   WeakProcessor::oops_do(root_closure);
 754 }
 755 
 756 bool GenCollectedHeap::no_allocs_since_save_marks() {
 757   return _young_gen->no_allocs_since_save_marks() &&
 758          _old_gen->no_allocs_since_save_marks();
 759 }
 760 
 761 // public collection interfaces
 762 void GenCollectedHeap::collect(GCCause::Cause cause) {
 763   // The caller doesn't have the Heap_lock
 764   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 765 
 766   unsigned int gc_count_before;
 767   unsigned int full_gc_count_before;
 768 
 769   {
 770     MutexLocker ml(Heap_lock);
 771     // Read the GC count while holding the Heap_lock
 772     gc_count_before      = total_collections();
 773     full_gc_count_before = total_full_collections();
 774   }
 775 
 776   if (GCLocker::should_discard(cause, gc_count_before)) {
 777     return;
 778   }
 779 
 780   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
 781                            || (cause == GCCause::_gc_locker)
 782                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
 783 
 784   const GenerationType max_generation = should_run_young_gc
 785                                       ? YoungGen
 786                                       : OldGen;
 787 
 788   while (true) {
 789     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 790                         cause, max_generation);
 791     VMThread::execute(&op);
 792 
 793     if (!GCCause::is_explicit_full_gc(cause)) {
 794       return;
 795     }
 796 
 797     {
 798       MutexLocker ml(Heap_lock);
 799       // Read the GC count while holding the Heap_lock
 800       if (full_gc_count_before != total_full_collections()) {
 801         return;
 802       }
 803     }
 804 
 805     if (GCLocker::is_active_and_needs_gc()) {
 806       // If GCLocker is active, wait until clear before retrying.
 807       GCLocker::stall_until_clear();
 808     }
 809   }
 810 }
 811 
 812 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 813    do_full_collection(clear_all_soft_refs, OldGen);
 814 }
 815 
 816 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 817                                           GenerationType last_generation) {
 818   do_collection(true,                   // full
 819                 clear_all_soft_refs,    // clear_all_soft_refs
 820                 0,                      // size
 821                 false,                  // is_tlab
 822                 last_generation);       // last_generation
 823   // Hack XXX FIX ME !!!
 824   // A scavenge may not have been attempted, or may have
 825   // been attempted and failed, because the old gen was too full
 826   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 827     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 828     // This time allow the old gen to be collected as well
 829     do_collection(true,                // full
 830                   clear_all_soft_refs, // clear_all_soft_refs
 831                   0,                   // size
 832                   false,               // is_tlab
 833                   OldGen);             // last_generation
 834   }
 835 }
 836 
 837 bool GenCollectedHeap::is_in_young(const void* p) const {
 838   bool result = p < _old_gen->reserved().start();
 839   assert(result == _young_gen->is_in_reserved(p),
 840          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
 841   return result;
 842 }
 843 
 844 bool GenCollectedHeap::requires_barriers(stackChunkOop obj) const {
 845   return !is_in_young(obj);
 846 }
 847 
 848 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 849 bool GenCollectedHeap::is_in(const void* p) const {
 850   return _young_gen->is_in(p) || _old_gen->is_in(p);
 851 }
 852 
 853 #ifdef ASSERT
 854 // Don't implement this by using is_in_young().  This method is used
 855 // in some cases to check that is_in_young() is correct.
 856 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 857   assert(is_in_reserved(p) || p == nullptr,
 858     "Does not work if address is non-null and outside of the heap");
 859   return p < _young_gen->reserved().end() && p != nullptr;
 860 }
 861 #endif
 862 
 863 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
 864   _young_gen->oop_iterate(cl);
 865   _old_gen->oop_iterate(cl);
 866 }
 867 
 868 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 869   _young_gen->object_iterate(cl);
 870   _old_gen->object_iterate(cl);
 871 }
 872 
 873 Space* GenCollectedHeap::space_containing(const void* addr) const {
 874   Space* res = _young_gen->space_containing(addr);
 875   if (res != nullptr) {
 876     return res;
 877   }
 878   res = _old_gen->space_containing(addr);
 879   assert(res != nullptr, "Could not find containing space");
 880   return res;
 881 }
 882 
 883 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 884   assert(is_in_reserved(addr), "block_start of address outside of heap");
 885   if (_young_gen->is_in_reserved(addr)) {
 886     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 887     return _young_gen->block_start(addr);
 888   }
 889 
 890   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 891   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 892   return _old_gen->block_start(addr);
 893 }
 894 
 895 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 896   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 897   assert(block_start(addr) == addr, "addr must be a block start");
 898   if (_young_gen->is_in_reserved(addr)) {
 899     return _young_gen->block_is_obj(addr);
 900   }
 901 
 902   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 903   return _old_gen->block_is_obj(addr);
 904 }
 905 
 906 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 907   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 908   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 909   return _young_gen->tlab_capacity();
 910 }
 911 
 912 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 913   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 914   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 915   return _young_gen->tlab_used();
 916 }
 917 
 918 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 919   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 920   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 921   return _young_gen->unsafe_max_tlab_alloc();
 922 }
 923 
 924 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
 925                                               size_t requested_size,
 926                                               size_t* actual_size) {
 927   HeapWord* result = mem_allocate_work(requested_size /* size */,
 928                                        true /* is_tlab */);
 929   if (result != nullptr) {
 930     *actual_size = requested_size;
 931   }
 932 
 933   return result;
 934 }
 935 
 936 // Requires "*prev_ptr" to be non-null.  Deletes and a block of minimal size
 937 // from the list headed by "*prev_ptr".
 938 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
 939   bool first = true;
 940   size_t min_size = 0;   // "first" makes this conceptually infinite.
 941   ScratchBlock **smallest_ptr, *smallest;
 942   ScratchBlock  *cur = *prev_ptr;
 943   while (cur) {
 944     assert(*prev_ptr == cur, "just checking");
 945     if (first || cur->num_words < min_size) {
 946       smallest_ptr = prev_ptr;
 947       smallest     = cur;
 948       min_size     = smallest->num_words;
 949       first        = false;
 950     }
 951     prev_ptr = &cur->next;
 952     cur     =  cur->next;
 953   }
 954   smallest      = *smallest_ptr;
 955   *smallest_ptr = smallest->next;
 956   return smallest;
 957 }
 958 
 959 // Sort the scratch block list headed by res into decreasing size order,
 960 // and set "res" to the result.
 961 static void sort_scratch_list(ScratchBlock*& list) {
 962   ScratchBlock* sorted = nullptr;
 963   ScratchBlock* unsorted = list;
 964   while (unsorted) {
 965     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
 966     smallest->next  = sorted;
 967     sorted          = smallest;
 968   }
 969   list = sorted;
 970 }
 971 
 972 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
 973                                                size_t max_alloc_words) {
 974   ScratchBlock* res = nullptr;
 975   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
 976   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
 977   sort_scratch_list(res);
 978   return res;
 979 }
 980 
 981 void GenCollectedHeap::release_scratch() {
 982   _young_gen->reset_scratch();
 983   _old_gen->reset_scratch();
 984 }
 985 
 986 void GenCollectedHeap::prepare_for_verify() {
 987   ensure_parsability(false);        // no need to retire TLABs
 988 }
 989 
 990 void GenCollectedHeap::generation_iterate(GenClosure* cl,
 991                                           bool old_to_young) {
 992   if (old_to_young) {
 993     cl->do_generation(_old_gen);
 994     cl->do_generation(_young_gen);
 995   } else {
 996     cl->do_generation(_young_gen);
 997     cl->do_generation(_old_gen);
 998   }
 999 }
1000 
1001 bool GenCollectedHeap::is_maximal_no_gc() const {
1002   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1003 }
1004 
1005 void GenCollectedHeap::save_marks() {
1006   _young_gen->save_marks();
1007   _old_gen->save_marks();
1008 }
1009 
1010 GenCollectedHeap* GenCollectedHeap::heap() {
1011   // SerialHeap is the only subtype of GenCollectedHeap.
1012   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
1013 }
1014 
1015 #if INCLUDE_SERIALGC
1016 void GenCollectedHeap::prepare_for_compaction() {
1017   // Start by compacting into same gen.
1018   CompactPoint cp(_old_gen);
1019   _old_gen->prepare_for_compaction(&cp);
1020   _young_gen->prepare_for_compaction(&cp);
1021 }
1022 #endif // INCLUDE_SERIALGC
1023 
1024 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1025   log_debug(gc, verify)("%s", _old_gen->name());
1026   _old_gen->verify();
1027 
1028   log_debug(gc, verify)("%s", _young_gen->name());
1029   _young_gen->verify();
1030 
1031   log_debug(gc, verify)("RemSet");
1032   rem_set()->verify();
1033 }
1034 
1035 void GenCollectedHeap::print_on(outputStream* st) const {
1036   if (_young_gen != nullptr) {
1037     _young_gen->print_on(st);
1038   }
1039   if (_old_gen != nullptr) {
1040     _old_gen->print_on(st);
1041   }
1042   MetaspaceUtils::print_on(st);
1043 }
1044 
1045 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1046 }
1047 
1048 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
1049   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
1050 }
1051 
1052 void GenCollectedHeap::print_tracing_info() const {
1053   if (log_is_enabled(Debug, gc, heap, exit)) {
1054     LogStreamHandle(Debug, gc, heap, exit) lsh;
1055     _young_gen->print_summary_info_on(&lsh);
1056     _old_gen->print_summary_info_on(&lsh);
1057   }
1058 }
1059 
1060 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
1061   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
1062 
1063   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
1064                      HEAP_CHANGE_FORMAT" "
1065                      HEAP_CHANGE_FORMAT,
1066                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1067                                              pre_gc_values.young_gen_used(),
1068                                              pre_gc_values.young_gen_capacity(),
1069                                              def_new_gen->used(),
1070                                              def_new_gen->capacity()),
1071                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1072                                              pre_gc_values.eden_used(),
1073                                              pre_gc_values.eden_capacity(),
1074                                              def_new_gen->eden()->used(),
1075                                              def_new_gen->eden()->capacity()),
1076                      HEAP_CHANGE_FORMAT_ARGS("From",
1077                                              pre_gc_values.from_used(),
1078                                              pre_gc_values.from_capacity(),
1079                                              def_new_gen->from()->used(),
1080                                              def_new_gen->from()->capacity()));
1081   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1082                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1083                                              pre_gc_values.old_gen_used(),
1084                                              pre_gc_values.old_gen_capacity(),
1085                                              old_gen()->used(),
1086                                              old_gen()->capacity()));
1087   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1088 }
1089 
1090 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1091  private:
1092   bool _full;
1093  public:
1094   void do_generation(Generation* gen) {
1095     gen->gc_prologue(_full);
1096   }
1097   GenGCPrologueClosure(bool full) : _full(full) {};
1098 };
1099 
1100 void GenCollectedHeap::gc_prologue(bool full) {
1101   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1102 
1103   // Fill TLAB's and such
1104   ensure_parsability(true);   // retire TLABs
1105 
1106   // Walk generations
1107   GenGCPrologueClosure blk(full);
1108   generation_iterate(&blk, false);  // not old-to-young.
1109 };
1110 
1111 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1112  private:
1113   bool _full;
1114  public:
1115   void do_generation(Generation* gen) {
1116     gen->gc_epilogue(_full);
1117   }
1118   GenGCEpilogueClosure(bool full) : _full(full) {};
1119 };
1120 
1121 void GenCollectedHeap::gc_epilogue(bool full) {
1122 #if COMPILER2_OR_JVMCI
1123   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1124 #endif // COMPILER2_OR_JVMCI
1125 
1126   resize_all_tlabs();
1127 
1128   GenGCEpilogueClosure blk(full);
1129   generation_iterate(&blk, false);  // not old-to-young.
1130 
1131   MetaspaceCounters::update_performance_counters();
1132 };
1133 
1134 #ifndef PRODUCT
1135 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1136  private:
1137  public:
1138   void do_generation(Generation* gen) {
1139     gen->record_spaces_top();
1140   }
1141 };
1142 
1143 void GenCollectedHeap::record_gen_tops_before_GC() {
1144   if (ZapUnusedHeapArea) {
1145     GenGCSaveTopsBeforeGCClosure blk;
1146     generation_iterate(&blk, false);  // not old-to-young.
1147   }
1148 }
1149 #endif  // not PRODUCT
1150 
1151 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1152  public:
1153   void do_generation(Generation* gen) {
1154     gen->ensure_parsability();
1155   }
1156 };
1157 
1158 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1159   CollectedHeap::ensure_parsability(retire_tlabs);
1160   GenEnsureParsabilityClosure ep_cl;
1161   generation_iterate(&ep_cl, false);
1162 }