1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/serial/cardTableRS.hpp"
  34 #include "gc/serial/defNewGeneration.hpp"
  35 #include "gc/serial/genMarkSweep.hpp"
  36 #include "gc/serial/markSweep.hpp"
  37 #include "gc/shared/adaptiveSizePolicy.hpp"
  38 #include "gc/shared/cardTableBarrierSet.hpp"
  39 #include "gc/shared/collectedHeap.inline.hpp"
  40 #include "gc/shared/collectorCounters.hpp"
  41 #include "gc/shared/continuationGCSupport.inline.hpp"
  42 #include "gc/shared/gcForwarding.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcInitLogger.hpp"
  45 #include "gc/shared/gcLocker.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTrace.hpp"
  48 #include "gc/shared/gcTraceTime.inline.hpp"
  49 #include "gc/shared/gcVMOperations.hpp"
  50 #include "gc/shared/genArguments.hpp"
  51 #include "gc/shared/genCollectedHeap.hpp"
  52 #include "gc/shared/generationSpec.hpp"
  53 #include "gc/shared/locationPrinter.inline.hpp"
  54 #include "gc/shared/oopStorage.inline.hpp"
  55 #include "gc/shared/oopStorageParState.inline.hpp"
  56 #include "gc/shared/oopStorageSet.inline.hpp"
  57 #include "gc/shared/scavengableNMethods.hpp"
  58 #include "gc/shared/space.hpp"
  59 #include "gc/shared/strongRootsScope.hpp"
  60 #include "gc/shared/weakProcessor.hpp"
  61 #include "gc/shared/workerThread.hpp"
  62 #include "memory/iterator.hpp"
  63 #include "memory/metaspaceCounters.hpp"
  64 #include "memory/metaspaceUtils.hpp"
  65 #include "memory/resourceArea.hpp"
  66 #include "memory/universe.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "runtime/handles.hpp"
  69 #include "runtime/handles.inline.hpp"
  70 #include "runtime/java.hpp"
  71 #include "runtime/threads.hpp"
  72 #include "runtime/vmThread.hpp"
  73 #include "services/memoryService.hpp"
  74 #include "utilities/autoRestore.hpp"
  75 #include "utilities/debug.hpp"
  76 #include "utilities/formatBuffer.hpp"
  77 #include "utilities/macros.hpp"
  78 #include "utilities/stack.inline.hpp"
  79 #include "utilities/vmError.hpp"
  80 #if INCLUDE_JVMCI
  81 #include "jvmci/jvmci.hpp"
  82 #endif
  83 
  84 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  85                                    Generation::Name old,
  86                                    const char* policy_counters_name) :
  87   CollectedHeap(),
  88   _young_gen(nullptr),
  89   _old_gen(nullptr),
  90   _young_gen_spec(new GenerationSpec(young,
  91                                      NewSize,
  92                                      MaxNewSize,
  93                                      GenAlignment)),
  94   _old_gen_spec(new GenerationSpec(old,
  95                                    OldSize,
  96                                    MaxOldSize,
  97                                    GenAlignment)),
  98   _rem_set(nullptr),
  99   _soft_ref_policy(),
 100   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
 101   _incremental_collection_failed(false),
 102   _full_collections_completed(0),
 103   _young_manager(nullptr),
 104   _old_manager(nullptr) {
 105 }
 106 
 107 jint GenCollectedHeap::initialize() {
 108   // Allocate space for the heap.
 109 
 110   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 111 
 112   if (!heap_rs.is_reserved()) {
 113     vm_shutdown_during_initialization(
 114       "Could not reserve enough space for object heap");
 115     return JNI_ENOMEM;
 116   }
 117 
 118   initialize_reserved_region(heap_rs);
 119 
 120   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
 121   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 122 
 123   _rem_set = create_rem_set(heap_rs.region());
 124   _rem_set->initialize(young_rs.base(), old_rs.base());
 125 
 126   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 127   bs->initialize();
 128   BarrierSet::set_barrier_set(bs);
 129 
 130   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 131   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 132 
 133   GCInitLogger::print();
 134 
 135   GCForwarding::initialize(_reserved, SpaceAlignment);
 136 
 137   return JNI_OK;
 138 }
 139 
 140 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 141   return new CardTableRS(reserved_region);
 142 }
 143 
 144 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
 145   // Now figure out the total size.
 146   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 147   assert(alignment % pageSize == 0, "Must be");
 148 
 149   // Check for overflow.
 150   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 151   if (total_reserved < _young_gen_spec->max_size()) {
 152     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 153                                   "the maximum representable size");
 154   }
 155   assert(total_reserved % alignment == 0,
 156          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 157          SIZE_FORMAT, total_reserved, alignment);
 158 
 159   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 160   size_t used_page_size = heap_rs.page_size();
 161 
 162   os::trace_page_sizes("Heap",
 163                        MinHeapSize,
 164                        total_reserved,
 165                        heap_rs.base(),
 166                        heap_rs.size(),
 167                        used_page_size);
 168 
 169   return heap_rs;
 170 }
 171 
 172 class GenIsScavengable : public BoolObjectClosure {
 173 public:
 174   bool do_object_b(oop obj) {
 175     return GenCollectedHeap::heap()->is_in_young(obj);
 176   }
 177 };
 178 
 179 static GenIsScavengable _is_scavengable;
 180 
 181 void GenCollectedHeap::post_initialize() {
 182   CollectedHeap::post_initialize();
 183 
 184   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 185 
 186   def_new_gen->ref_processor_init();
 187 
 188   MarkSweep::initialize();
 189 
 190   ScavengableNMethods::initialize(&_is_scavengable);
 191 }
 192 
 193 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
 194   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 195 
 196   return PreGenGCValues(def_new_gen->used(),
 197                         def_new_gen->capacity(),
 198                         def_new_gen->eden()->used(),
 199                         def_new_gen->eden()->capacity(),
 200                         def_new_gen->from()->used(),
 201                         def_new_gen->from()->capacity(),
 202                         old_gen()->used(),
 203                         old_gen()->capacity());
 204 }
 205 
 206 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 207   return _young_gen_spec;
 208 }
 209 
 210 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 211   return _old_gen_spec;
 212 }
 213 
 214 size_t GenCollectedHeap::capacity() const {
 215   return _young_gen->capacity() + _old_gen->capacity();
 216 }
 217 
 218 size_t GenCollectedHeap::used() const {
 219   return _young_gen->used() + _old_gen->used();
 220 }
 221 
 222 void GenCollectedHeap::save_used_regions() {
 223   _old_gen->save_used_region();
 224   _young_gen->save_used_region();
 225 }
 226 
 227 size_t GenCollectedHeap::max_capacity() const {
 228   return _young_gen->max_capacity() + _old_gen->max_capacity();
 229 }
 230 
 231 // Update the _full_collections_completed counter
 232 // at the end of a stop-world full GC.
 233 unsigned int GenCollectedHeap::update_full_collections_completed() {
 234   assert(_full_collections_completed <= _total_full_collections,
 235          "Can't complete more collections than were started");
 236   _full_collections_completed = _total_full_collections;
 237   return _full_collections_completed;
 238 }
 239 
 240 // Return true if any of the following is true:
 241 // . the allocation won't fit into the current young gen heap
 242 // . gc locker is occupied (jni critical section)
 243 // . heap memory is tight -- the most recent previous collection
 244 //   was a full collection because a partial collection (would
 245 //   have) failed and is likely to fail again
 246 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 247   size_t young_capacity = _young_gen->capacity_before_gc();
 248   return    (word_size > heap_word_size(young_capacity))
 249          || GCLocker::is_active_and_needs_gc()
 250          || incremental_collection_failed();
 251 }
 252 
 253 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 254   HeapWord* result = nullptr;
 255   if (_old_gen->should_allocate(size, is_tlab)) {
 256     result = _old_gen->expand_and_allocate(size, is_tlab);
 257   }
 258   if (result == nullptr) {
 259     if (_young_gen->should_allocate(size, is_tlab)) {
 260       result = _young_gen->expand_and_allocate(size, is_tlab);
 261     }
 262   }
 263   assert(result == nullptr || is_in_reserved(result), "result not in heap");
 264   return result;
 265 }
 266 
 267 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 268                                               bool is_tlab) {
 269 
 270   HeapWord* result = nullptr;
 271 
 272   // Loop until the allocation is satisfied, or unsatisfied after GC.
 273   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 274 
 275     // First allocation attempt is lock-free.
 276     Generation *young = _young_gen;
 277     if (young->should_allocate(size, is_tlab)) {
 278       result = young->par_allocate(size, is_tlab);
 279       if (result != nullptr) {
 280         assert(is_in_reserved(result), "result not in heap");
 281         return result;
 282       }
 283     }
 284     uint gc_count_before;  // Read inside the Heap_lock locked region.
 285     {
 286       MutexLocker ml(Heap_lock);
 287       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 288       // Note that only large objects get a shot at being
 289       // allocated in later generations.
 290       bool first_only = !should_try_older_generation_allocation(size);
 291 
 292       result = attempt_allocation(size, is_tlab, first_only);
 293       if (result != nullptr) {
 294         assert(is_in_reserved(result), "result not in heap");
 295         return result;
 296       }
 297 
 298       if (GCLocker::is_active_and_needs_gc()) {
 299         if (is_tlab) {
 300           return nullptr;  // Caller will retry allocating individual object.
 301         }
 302         if (!is_maximal_no_gc()) {
 303           // Try and expand heap to satisfy request.
 304           result = expand_heap_and_allocate(size, is_tlab);
 305           // Result could be null if we are out of space.
 306           if (result != nullptr) {
 307             return result;
 308           }
 309         }
 310 
 311         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 312           return nullptr; // We didn't get to do a GC and we didn't get any memory.
 313         }
 314 
 315         // If this thread is not in a jni critical section, we stall
 316         // the requestor until the critical section has cleared and
 317         // GC allowed. When the critical section clears, a GC is
 318         // initiated by the last thread exiting the critical section; so
 319         // we retry the allocation sequence from the beginning of the loop,
 320         // rather than causing more, now probably unnecessary, GC attempts.
 321         JavaThread* jthr = JavaThread::current();
 322         if (!jthr->in_critical()) {
 323           MutexUnlocker mul(Heap_lock);
 324           // Wait for JNI critical section to be exited
 325           GCLocker::stall_until_clear();
 326           gclocker_stalled_count += 1;
 327           continue;
 328         } else {
 329           if (CheckJNICalls) {
 330             fatal("Possible deadlock due to allocating while"
 331                   " in jni critical section");
 332           }
 333           return nullptr;
 334         }
 335       }
 336 
 337       // Read the gc count while the heap lock is held.
 338       gc_count_before = total_collections();
 339     }
 340 
 341     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 342     VMThread::execute(&op);
 343     if (op.prologue_succeeded()) {
 344       result = op.result();
 345       if (op.gc_locked()) {
 346          assert(result == nullptr, "must be null if gc_locked() is true");
 347          continue;  // Retry and/or stall as necessary.
 348       }
 349 
 350       assert(result == nullptr || is_in_reserved(result),
 351              "result not in heap");
 352       return result;
 353     }
 354 
 355     // Give a warning if we seem to be looping forever.
 356     if ((QueuedAllocationWarningCount > 0) &&
 357         (try_count % QueuedAllocationWarningCount == 0)) {
 358           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 359                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 360     }
 361   }
 362 }
 363 
 364 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 365                                                bool is_tlab,
 366                                                bool first_only) {
 367   HeapWord* res = nullptr;
 368 
 369   if (_young_gen->should_allocate(size, is_tlab)) {
 370     res = _young_gen->allocate(size, is_tlab);
 371     if (res != nullptr || first_only) {
 372       return res;
 373     }
 374   }
 375 
 376   if (_old_gen->should_allocate(size, is_tlab)) {
 377     res = _old_gen->allocate(size, is_tlab);
 378   }
 379 
 380   return res;
 381 }
 382 
 383 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 384                                          bool* gc_overhead_limit_was_exceeded) {
 385   return mem_allocate_work(size,
 386                            false /* is_tlab */);
 387 }
 388 
 389 bool GenCollectedHeap::must_clear_all_soft_refs() {
 390   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 391          _gc_cause == GCCause::_wb_full_gc;
 392 }
 393 
 394 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 395                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
 396   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 397   GCTraceTime(Trace, gc, phases) t1(title);
 398   TraceCollectorStats tcs(gen->counters());
 399   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC");
 400 
 401   gen->stat_record()->invocations++;
 402   gen->stat_record()->accumulated_time.start();
 403 
 404   // Must be done anew before each collection because
 405   // a previous collection will do mangling and will
 406   // change top of some spaces.
 407   record_gen_tops_before_GC();
 408 
 409   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 410 
 411   if (run_verification && VerifyBeforeGC) {
 412     Universe::verify("Before GC");
 413   }
 414   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 415 
 416   // Do collection work
 417   {
 418     save_marks();   // save marks for all gens
 419 
 420     gen->collect(full, clear_soft_refs, size, is_tlab);
 421   }
 422 
 423   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 424 
 425   gen->stat_record()->accumulated_time.stop();
 426 
 427   update_gc_stats(gen, full);
 428 
 429   if (run_verification && VerifyAfterGC) {
 430     Universe::verify("After GC");
 431   }
 432 }
 433 
 434 void GenCollectedHeap::do_collection(bool           full,
 435                                      bool           clear_all_soft_refs,
 436                                      size_t         size,
 437                                      bool           is_tlab,
 438                                      GenerationType max_generation) {
 439   ResourceMark rm;
 440   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 441 
 442   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 443   assert(my_thread->is_VM_thread(), "only VM thread");
 444   assert(Heap_lock->is_locked(),
 445          "the requesting thread should have the Heap_lock");
 446   guarantee(!is_gc_active(), "collection is not reentrant");
 447 
 448   if (GCLocker::check_active_before_gc()) {
 449     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 450   }
 451 
 452   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 453                           soft_ref_policy()->should_clear_all_soft_refs();
 454 
 455   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 456 
 457   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
 458 
 459   bool complete = full && (max_generation == OldGen);
 460   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 461   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 462 
 463   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 464 
 465   bool run_verification = total_collections() >= VerifyGCStartAt;
 466   bool prepared_for_verification = false;
 467   bool do_full_collection = false;
 468 
 469   if (do_young_collection) {
 470     GCIdMark gc_id_mark;
 471     GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
 472     GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
 473 
 474     print_heap_before_gc();
 475 
 476     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 477       prepare_for_verify();
 478       prepared_for_verification = true;
 479     }
 480 
 481     gc_prologue(complete);
 482     increment_total_collections(complete);
 483 
 484     collect_generation(_young_gen,
 485                        full,
 486                        size,
 487                        is_tlab,
 488                        run_verification && VerifyGCLevel <= 0,
 489                        do_clear_all_soft_refs);
 490 
 491     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 492         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 493       // Allocation request was met by young GC.
 494       size = 0;
 495     }
 496 
 497     // Ask if young collection is enough. If so, do the final steps for young collection,
 498     // and fallthrough to the end.
 499     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 500     if (!do_full_collection) {
 501       // Adjust generation sizes.
 502       _young_gen->compute_new_size();
 503 
 504       print_heap_change(pre_gc_values);
 505 
 506       // Track memory usage and detect low memory after GC finishes
 507       MemoryService::track_memory_usage();
 508 
 509       gc_epilogue(complete);
 510     }
 511 
 512     print_heap_after_gc();
 513 
 514   } else {
 515     // No young collection, ask if we need to perform Full collection.
 516     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 517   }
 518 
 519   if (do_full_collection) {
 520     GCIdMark gc_id_mark;
 521     GCTraceCPUTime tcpu(GenMarkSweep::gc_tracer());
 522     GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
 523 
 524     print_heap_before_gc();
 525 
 526     if (!prepared_for_verification && run_verification &&
 527         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 528       prepare_for_verify();
 529     }
 530 
 531     if (!do_young_collection) {
 532       gc_prologue(complete);
 533       increment_total_collections(complete);
 534     }
 535 
 536     // Accounting quirk: total full collections would be incremented when "complete"
 537     // is set, by calling increment_total_collections above. However, we also need to
 538     // account Full collections that had "complete" unset.
 539     if (!complete) {
 540       increment_total_full_collections();
 541     }
 542 
 543     CodeCache::on_gc_marking_cycle_start();
 544 
 545     collect_generation(_old_gen,
 546                        full,
 547                        size,
 548                        is_tlab,
 549                        run_verification && VerifyGCLevel <= 1,
 550                        do_clear_all_soft_refs);
 551 
 552     CodeCache::on_gc_marking_cycle_finish();
 553     CodeCache::arm_all_nmethods();
 554 
 555     // Adjust generation sizes.
 556     _old_gen->compute_new_size();
 557     _young_gen->compute_new_size();
 558 
 559     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 560     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 561     DEBUG_ONLY(MetaspaceUtils::verify();)
 562 
 563     // Need to clear claim bits for the next mark.
 564     ClassLoaderDataGraph::clear_claimed_marks();
 565 
 566     // Resize the metaspace capacity after full collections
 567     MetaspaceGC::compute_new_size();
 568     update_full_collections_completed();
 569 
 570     print_heap_change(pre_gc_values);
 571 
 572     // Track memory usage and detect low memory after GC finishes
 573     MemoryService::track_memory_usage();
 574 
 575     // Need to tell the epilogue code we are done with Full GC, regardless what was
 576     // the initial value for "complete" flag.
 577     gc_epilogue(true);
 578 
 579     print_heap_after_gc();
 580   }
 581 }
 582 
 583 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 584                                                  GenCollectedHeap::GenerationType max_gen) const {
 585   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 586 }
 587 
 588 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 589   ScavengableNMethods::register_nmethod(nm);
 590 }
 591 
 592 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 593   ScavengableNMethods::unregister_nmethod(nm);
 594 }
 595 
 596 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 597   ScavengableNMethods::verify_nmethod(nm);
 598 }
 599 
 600 void GenCollectedHeap::prune_scavengable_nmethods() {
 601   ScavengableNMethods::prune_nmethods();
 602 }
 603 
 604 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 605   GCCauseSetter x(this, GCCause::_allocation_failure);
 606   HeapWord* result = nullptr;
 607 
 608   assert(size != 0, "Precondition violated");
 609   if (GCLocker::is_active_and_needs_gc()) {
 610     // GC locker is active; instead of a collection we will attempt
 611     // to expand the heap, if there's room for expansion.
 612     if (!is_maximal_no_gc()) {
 613       result = expand_heap_and_allocate(size, is_tlab);
 614     }
 615     return result;   // Could be null if we are out of space.
 616   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 617     // Do an incremental collection.
 618     do_collection(false,                     // full
 619                   false,                     // clear_all_soft_refs
 620                   size,                      // size
 621                   is_tlab,                   // is_tlab
 622                   GenCollectedHeap::OldGen); // max_generation
 623   } else {
 624     log_trace(gc)(" :: Trying full because partial may fail :: ");
 625     // Try a full collection; see delta for bug id 6266275
 626     // for the original code and why this has been simplified
 627     // with from-space allocation criteria modified and
 628     // such allocation moved out of the safepoint path.
 629     do_collection(true,                      // full
 630                   false,                     // clear_all_soft_refs
 631                   size,                      // size
 632                   is_tlab,                   // is_tlab
 633                   GenCollectedHeap::OldGen); // max_generation
 634   }
 635 
 636   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 637 
 638   if (result != nullptr) {
 639     assert(is_in_reserved(result), "result not in heap");
 640     return result;
 641   }
 642 
 643   // OK, collection failed, try expansion.
 644   result = expand_heap_and_allocate(size, is_tlab);
 645   if (result != nullptr) {
 646     return result;
 647   }
 648 
 649   // If we reach this point, we're really out of memory. Try every trick
 650   // we can to reclaim memory. Force collection of soft references. Force
 651   // a complete compaction of the heap. Any additional methods for finding
 652   // free memory should be here, especially if they are expensive. If this
 653   // attempt fails, an OOM exception will be thrown.
 654   {
 655     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 656 
 657     do_collection(true,                      // full
 658                   true,                      // clear_all_soft_refs
 659                   size,                      // size
 660                   is_tlab,                   // is_tlab
 661                   GenCollectedHeap::OldGen); // max_generation
 662   }
 663 
 664   result = attempt_allocation(size, is_tlab, false /* first_only */);
 665   if (result != nullptr) {
 666     assert(is_in_reserved(result), "result not in heap");
 667     return result;
 668   }
 669 
 670   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 671     "Flag should have been handled and cleared prior to this point");
 672 
 673   // What else?  We might try synchronous finalization later.  If the total
 674   // space available is large enough for the allocation, then a more
 675   // complete compaction phase than we've tried so far might be
 676   // appropriate.
 677   return nullptr;
 678 }
 679 
 680 #ifdef ASSERT
 681 class AssertNonScavengableClosure: public OopClosure {
 682 public:
 683   virtual void do_oop(oop* p) {
 684     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 685       "Referent should not be scavengable.");  }
 686   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 687 };
 688 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 689 #endif
 690 
 691 void GenCollectedHeap::process_roots(ScanningOption so,
 692                                      OopClosure* strong_roots,
 693                                      CLDClosure* strong_cld_closure,
 694                                      CLDClosure* weak_cld_closure,
 695                                      CodeBlobToOopClosure* code_roots) {
 696   // General roots.
 697   assert(code_roots != nullptr, "code root closure should always be set");
 698 
 699   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 700 
 701   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 702   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
 703 
 704   Threads::oops_do(strong_roots, roots_from_code_p);
 705 
 706   OopStorageSet::strong_oops_do(strong_roots);
 707 
 708   if (so & SO_ScavengeCodeCache) {
 709     assert(code_roots != nullptr, "must supply closure for code cache");
 710 
 711     // We only visit parts of the CodeCache when scavenging.
 712     ScavengableNMethods::nmethods_do(code_roots);
 713   }
 714   if (so & SO_AllCodeCache) {
 715     assert(code_roots != nullptr, "must supply closure for code cache");
 716 
 717     // CMSCollector uses this to do intermediate-strength collections.
 718     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 719     CodeCache::blobs_do(code_roots);
 720   }
 721   // Verify that the code cache contents are not subject to
 722   // movement by a scavenging collection.
 723   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 724   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 725 }
 726 
 727 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 728   WeakProcessor::oops_do(root_closure);
 729 }
 730 
 731 bool GenCollectedHeap::no_allocs_since_save_marks() {
 732   return _young_gen->no_allocs_since_save_marks() &&
 733          _old_gen->no_allocs_since_save_marks();
 734 }
 735 
 736 // public collection interfaces
 737 void GenCollectedHeap::collect(GCCause::Cause cause) {
 738   // The caller doesn't have the Heap_lock
 739   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 740 
 741   unsigned int gc_count_before;
 742   unsigned int full_gc_count_before;
 743 
 744   {
 745     MutexLocker ml(Heap_lock);
 746     // Read the GC count while holding the Heap_lock
 747     gc_count_before      = total_collections();
 748     full_gc_count_before = total_full_collections();
 749   }
 750 
 751   if (GCLocker::should_discard(cause, gc_count_before)) {
 752     return;
 753   }
 754 
 755   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
 756                            || (cause == GCCause::_gc_locker)
 757                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
 758 
 759   const GenerationType max_generation = should_run_young_gc
 760                                       ? YoungGen
 761                                       : OldGen;
 762 
 763   while (true) {
 764     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 765                         cause, max_generation);
 766     VMThread::execute(&op);
 767 
 768     if (!GCCause::is_explicit_full_gc(cause)) {
 769       return;
 770     }
 771 
 772     {
 773       MutexLocker ml(Heap_lock);
 774       // Read the GC count while holding the Heap_lock
 775       if (full_gc_count_before != total_full_collections()) {
 776         return;
 777       }
 778     }
 779 
 780     if (GCLocker::is_active_and_needs_gc()) {
 781       // If GCLocker is active, wait until clear before retrying.
 782       GCLocker::stall_until_clear();
 783     }
 784   }
 785 }
 786 
 787 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 788    do_full_collection(clear_all_soft_refs, OldGen);
 789 }
 790 
 791 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 792                                           GenerationType last_generation) {
 793   do_collection(true,                   // full
 794                 clear_all_soft_refs,    // clear_all_soft_refs
 795                 0,                      // size
 796                 false,                  // is_tlab
 797                 last_generation);       // last_generation
 798   // Hack XXX FIX ME !!!
 799   // A scavenge may not have been attempted, or may have
 800   // been attempted and failed, because the old gen was too full
 801   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 802     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 803     // This time allow the old gen to be collected as well
 804     do_collection(true,                // full
 805                   clear_all_soft_refs, // clear_all_soft_refs
 806                   0,                   // size
 807                   false,               // is_tlab
 808                   OldGen);             // last_generation
 809   }
 810 }
 811 
 812 bool GenCollectedHeap::is_in_young(const void* p) const {
 813   bool result = p < _old_gen->reserved().start();
 814   assert(result == _young_gen->is_in_reserved(p),
 815          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
 816   return result;
 817 }
 818 
 819 bool GenCollectedHeap::requires_barriers(stackChunkOop obj) const {
 820   return !is_in_young(obj);
 821 }
 822 
 823 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 824 bool GenCollectedHeap::is_in(const void* p) const {
 825   return _young_gen->is_in(p) || _old_gen->is_in(p);
 826 }
 827 
 828 #ifdef ASSERT
 829 // Don't implement this by using is_in_young().  This method is used
 830 // in some cases to check that is_in_young() is correct.
 831 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 832   assert(is_in_reserved(p) || p == nullptr,
 833     "Does not work if address is non-null and outside of the heap");
 834   return p < _young_gen->reserved().end() && p != nullptr;
 835 }
 836 #endif
 837 
 838 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
 839   _young_gen->oop_iterate(cl);
 840   _old_gen->oop_iterate(cl);
 841 }
 842 
 843 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 844   _young_gen->object_iterate(cl);
 845   _old_gen->object_iterate(cl);
 846 }
 847 
 848 Space* GenCollectedHeap::space_containing(const void* addr) const {
 849   Space* res = _young_gen->space_containing(addr);
 850   if (res != nullptr) {
 851     return res;
 852   }
 853   res = _old_gen->space_containing(addr);
 854   assert(res != nullptr, "Could not find containing space");
 855   return res;
 856 }
 857 
 858 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 859   assert(is_in_reserved(addr), "block_start of address outside of heap");
 860   if (_young_gen->is_in_reserved(addr)) {
 861     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 862     return _young_gen->block_start(addr);
 863   }
 864 
 865   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 866   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 867   return _old_gen->block_start(addr);
 868 }
 869 
 870 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 871   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 872   assert(block_start(addr) == addr, "addr must be a block start");
 873   if (_young_gen->is_in_reserved(addr)) {
 874     return _young_gen->block_is_obj(addr);
 875   }
 876 
 877   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 878   return _old_gen->block_is_obj(addr);
 879 }
 880 
 881 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 882   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 883   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 884   return _young_gen->tlab_capacity();
 885 }
 886 
 887 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 888   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 889   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 890   return _young_gen->tlab_used();
 891 }
 892 
 893 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 894   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 895   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 896   return _young_gen->unsafe_max_tlab_alloc();
 897 }
 898 
 899 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
 900                                               size_t requested_size,
 901                                               size_t* actual_size) {
 902   HeapWord* result = mem_allocate_work(requested_size /* size */,
 903                                        true /* is_tlab */);
 904   if (result != nullptr) {
 905     *actual_size = requested_size;
 906   }
 907 
 908   return result;
 909 }
 910 
 911 void GenCollectedHeap::prepare_for_verify() {
 912   ensure_parsability(false);        // no need to retire TLABs
 913 }
 914 
 915 void GenCollectedHeap::generation_iterate(GenClosure* cl,
 916                                           bool old_to_young) {
 917   if (old_to_young) {
 918     cl->do_generation(_old_gen);
 919     cl->do_generation(_young_gen);
 920   } else {
 921     cl->do_generation(_young_gen);
 922     cl->do_generation(_old_gen);
 923   }
 924 }
 925 
 926 bool GenCollectedHeap::is_maximal_no_gc() const {
 927   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
 928 }
 929 
 930 void GenCollectedHeap::save_marks() {
 931   _young_gen->save_marks();
 932   _old_gen->save_marks();
 933 }
 934 
 935 GenCollectedHeap* GenCollectedHeap::heap() {
 936   // SerialHeap is the only subtype of GenCollectedHeap.
 937   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
 938 }
 939 
 940 #if INCLUDE_SERIALGC
 941 void GenCollectedHeap::prepare_for_compaction() {
 942   // Start by compacting into same gen.
 943   CompactPoint cp(_old_gen);
 944   _old_gen->prepare_for_compaction(&cp);
 945   _young_gen->prepare_for_compaction(&cp);
 946 }
 947 #endif // INCLUDE_SERIALGC
 948 
 949 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
 950   log_debug(gc, verify)("%s", _old_gen->name());
 951   _old_gen->verify();
 952 
 953   log_debug(gc, verify)("%s", _young_gen->name());
 954   _young_gen->verify();
 955 
 956   log_debug(gc, verify)("RemSet");
 957   rem_set()->verify();
 958 }
 959 
 960 void GenCollectedHeap::print_on(outputStream* st) const {
 961   if (_young_gen != nullptr) {
 962     _young_gen->print_on(st);
 963   }
 964   if (_old_gen != nullptr) {
 965     _old_gen->print_on(st);
 966   }
 967   MetaspaceUtils::print_on(st);
 968 }
 969 
 970 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 971 }
 972 
 973 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
 974   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
 975 }
 976 
 977 void GenCollectedHeap::print_tracing_info() const {
 978   if (log_is_enabled(Debug, gc, heap, exit)) {
 979     LogStreamHandle(Debug, gc, heap, exit) lsh;
 980     _young_gen->print_summary_info_on(&lsh);
 981     _old_gen->print_summary_info_on(&lsh);
 982   }
 983 }
 984 
 985 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
 986   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 987 
 988   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
 989                      HEAP_CHANGE_FORMAT" "
 990                      HEAP_CHANGE_FORMAT,
 991                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
 992                                              pre_gc_values.young_gen_used(),
 993                                              pre_gc_values.young_gen_capacity(),
 994                                              def_new_gen->used(),
 995                                              def_new_gen->capacity()),
 996                      HEAP_CHANGE_FORMAT_ARGS("Eden",
 997                                              pre_gc_values.eden_used(),
 998                                              pre_gc_values.eden_capacity(),
 999                                              def_new_gen->eden()->used(),
1000                                              def_new_gen->eden()->capacity()),
1001                      HEAP_CHANGE_FORMAT_ARGS("From",
1002                                              pre_gc_values.from_used(),
1003                                              pre_gc_values.from_capacity(),
1004                                              def_new_gen->from()->used(),
1005                                              def_new_gen->from()->capacity()));
1006   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1007                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1008                                              pre_gc_values.old_gen_used(),
1009                                              pre_gc_values.old_gen_capacity(),
1010                                              old_gen()->used(),
1011                                              old_gen()->capacity()));
1012   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1013 }
1014 
1015 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1016  private:
1017   bool _full;
1018  public:
1019   void do_generation(Generation* gen) {
1020     gen->gc_prologue(_full);
1021   }
1022   GenGCPrologueClosure(bool full) : _full(full) {};
1023 };
1024 
1025 void GenCollectedHeap::gc_prologue(bool full) {
1026   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1027 
1028   // Fill TLAB's and such
1029   ensure_parsability(true);   // retire TLABs
1030 
1031   // Walk generations
1032   GenGCPrologueClosure blk(full);
1033   generation_iterate(&blk, false);  // not old-to-young.
1034 };
1035 
1036 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1037  private:
1038   bool _full;
1039  public:
1040   void do_generation(Generation* gen) {
1041     gen->gc_epilogue(_full);
1042   }
1043   GenGCEpilogueClosure(bool full) : _full(full) {};
1044 };
1045 
1046 void GenCollectedHeap::gc_epilogue(bool full) {
1047 #if COMPILER2_OR_JVMCI
1048   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1049 #endif // COMPILER2_OR_JVMCI
1050 
1051   resize_all_tlabs();
1052 
1053   GenGCEpilogueClosure blk(full);
1054   generation_iterate(&blk, false);  // not old-to-young.
1055 
1056   MetaspaceCounters::update_performance_counters();
1057 };
1058 
1059 #ifndef PRODUCT
1060 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1061  private:
1062  public:
1063   void do_generation(Generation* gen) {
1064     gen->record_spaces_top();
1065   }
1066 };
1067 
1068 void GenCollectedHeap::record_gen_tops_before_GC() {
1069   if (ZapUnusedHeapArea) {
1070     GenGCSaveTopsBeforeGCClosure blk;
1071     generation_iterate(&blk, false);  // not old-to-young.
1072   }
1073 }
1074 #endif  // not PRODUCT
1075 
1076 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1077  public:
1078   void do_generation(Generation* gen) {
1079     gen->ensure_parsability();
1080   }
1081 };
1082 
1083 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1084   CollectedHeap::ensure_parsability(retire_tlabs);
1085   GenEnsureParsabilityClosure ep_cl;
1086   generation_iterate(&ep_cl, false);
1087 }