1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/serial/cardTableRS.hpp"
  34 #include "gc/serial/defNewGeneration.hpp"
  35 #include "gc/serial/genMarkSweep.hpp"
  36 #include "gc/serial/markSweep.hpp"
  37 #include "gc/shared/adaptiveSizePolicy.hpp"
  38 #include "gc/shared/cardTableBarrierSet.hpp"
  39 #include "gc/shared/collectedHeap.inline.hpp"
  40 #include "gc/shared/collectorCounters.hpp"
  41 #include "gc/shared/continuationGCSupport.inline.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcInitLogger.hpp"
  44 #include "gc/shared/gcLocker.hpp"
  45 #include "gc/shared/gcPolicyCounters.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.inline.hpp"
  48 #include "gc/shared/gcVMOperations.hpp"
  49 #include "gc/shared/genArguments.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/generationSpec.hpp"
  52 #include "gc/shared/locationPrinter.inline.hpp"
  53 #include "gc/shared/oopStorage.inline.hpp"
  54 #include "gc/shared/oopStorageParState.inline.hpp"
  55 #include "gc/shared/oopStorageSet.inline.hpp"
  56 #include "gc/shared/scavengableNMethods.hpp"
  57 #include "gc/shared/slidingForwarding.hpp"
  58 #include "gc/shared/space.hpp"
  59 #include "gc/shared/strongRootsScope.hpp"
  60 #include "gc/shared/weakProcessor.hpp"
  61 #include "gc/shared/workerThread.hpp"
  62 #include "memory/iterator.hpp"
  63 #include "memory/metaspaceCounters.hpp"
  64 #include "memory/metaspaceUtils.hpp"
  65 #include "memory/resourceArea.hpp"
  66 #include "memory/universe.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "runtime/handles.hpp"
  69 #include "runtime/handles.inline.hpp"
  70 #include "runtime/java.hpp"
  71 #include "runtime/threads.hpp"
  72 #include "runtime/vmThread.hpp"
  73 #include "services/memoryService.hpp"
  74 #include "utilities/autoRestore.hpp"
  75 #include "utilities/debug.hpp"
  76 #include "utilities/formatBuffer.hpp"
  77 #include "utilities/macros.hpp"
  78 #include "utilities/stack.inline.hpp"
  79 #include "utilities/vmError.hpp"
  80 #if INCLUDE_JVMCI
  81 #include "jvmci/jvmci.hpp"
  82 #endif
  83 
  84 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  85                                    Generation::Name old,
  86                                    const char* policy_counters_name) :
  87   CollectedHeap(),
  88   _young_gen(nullptr),
  89   _old_gen(nullptr),
  90   _young_gen_spec(new GenerationSpec(young,
  91                                      NewSize,
  92                                      MaxNewSize,
  93                                      GenAlignment)),
  94   _old_gen_spec(new GenerationSpec(old,
  95                                    OldSize,
  96                                    MaxOldSize,
  97                                    GenAlignment)),
  98   _rem_set(nullptr),
  99   _soft_ref_gen_policy(),
 100   _size_policy(nullptr),
 101   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
 102   _incremental_collection_failed(false),
 103   _full_collections_completed(0),
 104   _young_manager(nullptr),
 105   _old_manager(nullptr) {
 106 }
 107 
 108 jint GenCollectedHeap::initialize() {
 109   // Allocate space for the heap.
 110 
 111   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 112 
 113   if (!heap_rs.is_reserved()) {
 114     vm_shutdown_during_initialization(
 115       "Could not reserve enough space for object heap");
 116     return JNI_ENOMEM;
 117   }
 118 
 119   initialize_reserved_region(heap_rs);
 120   _forwarding = new SlidingForwarding(_reserved);
 121 
 122   _rem_set = create_rem_set(heap_rs.region());
 123   _rem_set->initialize();
 124   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 125   bs->initialize();
 126   BarrierSet::set_barrier_set(bs);
 127 
 128   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
 129   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 130   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 131 
 132   old_rs = old_rs.first_part(_old_gen_spec->max_size());
 133   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 134 
 135   GCInitLogger::print();
 136 
 137   return JNI_OK;
 138 }
 139 
 140 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 141   return new CardTableRS(reserved_region);
 142 }
 143 
 144 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
 145                                               size_t init_promo_size,
 146                                               size_t init_survivor_size) {
 147   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
 148   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 149                                         init_promo_size,
 150                                         init_survivor_size,
 151                                         max_gc_pause_sec,
 152                                         GCTimeRatio);
 153 }
 154 
 155 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
 156   // Now figure out the total size.
 157   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 158   assert(alignment % pageSize == 0, "Must be");
 159 
 160   // Check for overflow.
 161   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 162   if (total_reserved < _young_gen_spec->max_size()) {
 163     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 164                                   "the maximum representable size");
 165   }
 166   assert(total_reserved % alignment == 0,
 167          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 168          SIZE_FORMAT, total_reserved, alignment);
 169 
 170   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 171   size_t used_page_size = heap_rs.page_size();
 172 
 173   os::trace_page_sizes("Heap",
 174                        MinHeapSize,
 175                        total_reserved,
 176                        used_page_size,
 177                        heap_rs.base(),
 178                        heap_rs.size());
 179 
 180   return heap_rs;
 181 }
 182 
 183 class GenIsScavengable : public BoolObjectClosure {
 184 public:
 185   bool do_object_b(oop obj) {
 186     return GenCollectedHeap::heap()->is_in_young(obj);
 187   }
 188 };
 189 
 190 static GenIsScavengable _is_scavengable;
 191 
 192 void GenCollectedHeap::post_initialize() {
 193   CollectedHeap::post_initialize();
 194   ref_processing_init();
 195 
 196   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 197 
 198   initialize_size_policy(def_new_gen->eden()->capacity(),
 199                          _old_gen->capacity(),
 200                          def_new_gen->from()->capacity());
 201 
 202   MarkSweep::initialize();
 203 
 204   ScavengableNMethods::initialize(&_is_scavengable);
 205 }
 206 
 207 void GenCollectedHeap::ref_processing_init() {
 208   _young_gen->ref_processor_init();
 209   _old_gen->ref_processor_init();
 210 }
 211 
 212 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
 213   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 214 
 215   return PreGenGCValues(def_new_gen->used(),
 216                         def_new_gen->capacity(),
 217                         def_new_gen->eden()->used(),
 218                         def_new_gen->eden()->capacity(),
 219                         def_new_gen->from()->used(),
 220                         def_new_gen->from()->capacity(),
 221                         old_gen()->used(),
 222                         old_gen()->capacity());
 223 }
 224 
 225 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 226   return _young_gen_spec;
 227 }
 228 
 229 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 230   return _old_gen_spec;
 231 }
 232 
 233 size_t GenCollectedHeap::capacity() const {
 234   return _young_gen->capacity() + _old_gen->capacity();
 235 }
 236 
 237 size_t GenCollectedHeap::used() const {
 238   return _young_gen->used() + _old_gen->used();
 239 }
 240 
 241 void GenCollectedHeap::save_used_regions() {
 242   _old_gen->save_used_region();
 243   _young_gen->save_used_region();
 244 }
 245 
 246 size_t GenCollectedHeap::max_capacity() const {
 247   return _young_gen->max_capacity() + _old_gen->max_capacity();
 248 }
 249 
 250 // Update the _full_collections_completed counter
 251 // at the end of a stop-world full GC.
 252 unsigned int GenCollectedHeap::update_full_collections_completed() {
 253   assert(_full_collections_completed <= _total_full_collections,
 254          "Can't complete more collections than were started");
 255   _full_collections_completed = _total_full_collections;
 256   return _full_collections_completed;
 257 }
 258 
 259 // Return true if any of the following is true:
 260 // . the allocation won't fit into the current young gen heap
 261 // . gc locker is occupied (jni critical section)
 262 // . heap memory is tight -- the most recent previous collection
 263 //   was a full collection because a partial collection (would
 264 //   have) failed and is likely to fail again
 265 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 266   size_t young_capacity = _young_gen->capacity_before_gc();
 267   return    (word_size > heap_word_size(young_capacity))
 268          || GCLocker::is_active_and_needs_gc()
 269          || incremental_collection_failed();
 270 }
 271 
 272 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 273   HeapWord* result = nullptr;
 274   if (_old_gen->should_allocate(size, is_tlab)) {
 275     result = _old_gen->expand_and_allocate(size, is_tlab);
 276   }
 277   if (result == nullptr) {
 278     if (_young_gen->should_allocate(size, is_tlab)) {
 279       result = _young_gen->expand_and_allocate(size, is_tlab);
 280     }
 281   }
 282   assert(result == nullptr || is_in_reserved(result), "result not in heap");
 283   return result;
 284 }
 285 
 286 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 287                                               bool is_tlab,
 288                                               bool* gc_overhead_limit_was_exceeded) {
 289   // In general gc_overhead_limit_was_exceeded should be false so
 290   // set it so here and reset it to true only if the gc time
 291   // limit is being exceeded as checked below.
 292   *gc_overhead_limit_was_exceeded = false;
 293 
 294   HeapWord* result = nullptr;
 295 
 296   // Loop until the allocation is satisfied, or unsatisfied after GC.
 297   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 298 
 299     // First allocation attempt is lock-free.
 300     Generation *young = _young_gen;
 301     if (young->should_allocate(size, is_tlab)) {
 302       result = young->par_allocate(size, is_tlab);
 303       if (result != nullptr) {
 304         assert(is_in_reserved(result), "result not in heap");
 305         return result;
 306       }
 307     }
 308     uint gc_count_before;  // Read inside the Heap_lock locked region.
 309     {
 310       MutexLocker ml(Heap_lock);
 311       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 312       // Note that only large objects get a shot at being
 313       // allocated in later generations.
 314       bool first_only = !should_try_older_generation_allocation(size);
 315 
 316       result = attempt_allocation(size, is_tlab, first_only);
 317       if (result != nullptr) {
 318         assert(is_in_reserved(result), "result not in heap");
 319         return result;
 320       }
 321 
 322       if (GCLocker::is_active_and_needs_gc()) {
 323         if (is_tlab) {
 324           return nullptr;  // Caller will retry allocating individual object.
 325         }
 326         if (!is_maximal_no_gc()) {
 327           // Try and expand heap to satisfy request.
 328           result = expand_heap_and_allocate(size, is_tlab);
 329           // Result could be null if we are out of space.
 330           if (result != nullptr) {
 331             return result;
 332           }
 333         }
 334 
 335         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 336           return nullptr; // We didn't get to do a GC and we didn't get any memory.
 337         }
 338 
 339         // If this thread is not in a jni critical section, we stall
 340         // the requestor until the critical section has cleared and
 341         // GC allowed. When the critical section clears, a GC is
 342         // initiated by the last thread exiting the critical section; so
 343         // we retry the allocation sequence from the beginning of the loop,
 344         // rather than causing more, now probably unnecessary, GC attempts.
 345         JavaThread* jthr = JavaThread::current();
 346         if (!jthr->in_critical()) {
 347           MutexUnlocker mul(Heap_lock);
 348           // Wait for JNI critical section to be exited
 349           GCLocker::stall_until_clear();
 350           gclocker_stalled_count += 1;
 351           continue;
 352         } else {
 353           if (CheckJNICalls) {
 354             fatal("Possible deadlock due to allocating while"
 355                   " in jni critical section");
 356           }
 357           return nullptr;
 358         }
 359       }
 360 
 361       // Read the gc count while the heap lock is held.
 362       gc_count_before = total_collections();
 363     }
 364 
 365     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 366     VMThread::execute(&op);
 367     if (op.prologue_succeeded()) {
 368       result = op.result();
 369       if (op.gc_locked()) {
 370          assert(result == nullptr, "must be null if gc_locked() is true");
 371          continue;  // Retry and/or stall as necessary.
 372       }
 373 
 374       // Allocation has failed and a collection
 375       // has been done.  If the gc time limit was exceeded the
 376       // this time, return null so that an out-of-memory
 377       // will be thrown.  Clear gc_overhead_limit_exceeded
 378       // so that the overhead exceeded does not persist.
 379 
 380       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 381       const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
 382 
 383       if (limit_exceeded && softrefs_clear) {
 384         *gc_overhead_limit_was_exceeded = true;
 385         size_policy()->set_gc_overhead_limit_exceeded(false);
 386         if (op.result() != nullptr) {
 387           CollectedHeap::fill_with_object(op.result(), size);
 388         }
 389         return nullptr;
 390       }
 391       assert(result == nullptr || is_in_reserved(result),
 392              "result not in heap");
 393       return result;
 394     }
 395 
 396     // Give a warning if we seem to be looping forever.
 397     if ((QueuedAllocationWarningCount > 0) &&
 398         (try_count % QueuedAllocationWarningCount == 0)) {
 399           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 400                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 401     }
 402   }
 403 }
 404 
 405 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 406                                                bool is_tlab,
 407                                                bool first_only) {
 408   HeapWord* res = nullptr;
 409 
 410   if (_young_gen->should_allocate(size, is_tlab)) {
 411     res = _young_gen->allocate(size, is_tlab);
 412     if (res != nullptr || first_only) {
 413       return res;
 414     }
 415   }
 416 
 417   if (_old_gen->should_allocate(size, is_tlab)) {
 418     res = _old_gen->allocate(size, is_tlab);
 419   }
 420 
 421   return res;
 422 }
 423 
 424 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 425                                          bool* gc_overhead_limit_was_exceeded) {
 426   return mem_allocate_work(size,
 427                            false /* is_tlab */,
 428                            gc_overhead_limit_was_exceeded);
 429 }
 430 
 431 bool GenCollectedHeap::must_clear_all_soft_refs() {
 432   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 433          _gc_cause == GCCause::_wb_full_gc;
 434 }
 435 
 436 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 437                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
 438   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 439   GCTraceTime(Trace, gc, phases) t1(title);
 440   TraceCollectorStats tcs(gen->counters());
 441   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
 442 
 443   gen->stat_record()->invocations++;
 444   gen->stat_record()->accumulated_time.start();
 445 
 446   // Must be done anew before each collection because
 447   // a previous collection will do mangling and will
 448   // change top of some spaces.
 449   record_gen_tops_before_GC();
 450 
 451   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 452 
 453   if (run_verification && VerifyBeforeGC) {
 454     Universe::verify("Before GC");
 455   }
 456   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 457 
 458   // Do collection work
 459   {
 460     // Note on ref discovery: For what appear to be historical reasons,
 461     // GCH enables and disabled (by enqueuing) refs discovery.
 462     // In the future this should be moved into the generation's
 463     // collect method so that ref discovery and enqueueing concerns
 464     // are local to a generation. The collect method could return
 465     // an appropriate indication in the case that notification on
 466     // the ref lock was needed. This will make the treatment of
 467     // weak refs more uniform (and indeed remove such concerns
 468     // from GCH). XXX
 469 
 470     save_marks();   // save marks for all gens
 471     // We want to discover references, but not process them yet.
 472     // This mode is disabled in process_discovered_references if the
 473     // generation does some collection work, or in
 474     // enqueue_discovered_references if the generation returns
 475     // without doing any work.
 476     ReferenceProcessor* rp = gen->ref_processor();
 477     rp->start_discovery(clear_soft_refs);
 478 
 479     gen->collect(full, clear_soft_refs, size, is_tlab);
 480 
 481     rp->disable_discovery();
 482     rp->verify_no_references_recorded();
 483   }
 484 
 485   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 486 
 487   gen->stat_record()->accumulated_time.stop();
 488 
 489   update_gc_stats(gen, full);
 490 
 491   if (run_verification && VerifyAfterGC) {
 492     Universe::verify("After GC");
 493   }
 494 }
 495 
 496 void GenCollectedHeap::do_collection(bool           full,
 497                                      bool           clear_all_soft_refs,
 498                                      size_t         size,
 499                                      bool           is_tlab,
 500                                      GenerationType max_generation) {
 501   ResourceMark rm;
 502   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 503 
 504   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 505   assert(my_thread->is_VM_thread(), "only VM thread");
 506   assert(Heap_lock->is_locked(),
 507          "the requesting thread should have the Heap_lock");
 508   guarantee(!is_gc_active(), "collection is not reentrant");
 509 
 510   if (GCLocker::check_active_before_gc()) {
 511     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 512   }
 513 
 514   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 515                           soft_ref_policy()->should_clear_all_soft_refs();
 516 
 517   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 518 
 519   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
 520 
 521   bool complete = full && (max_generation == OldGen);
 522   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 523   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 524 
 525   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 526 
 527   bool run_verification = total_collections() >= VerifyGCStartAt;
 528   bool prepared_for_verification = false;
 529   bool do_full_collection = false;
 530 
 531   if (do_young_collection) {
 532     GCIdMark gc_id_mark;
 533     GCTraceCPUTime tcpu(((DefNewGeneration*)_young_gen)->gc_tracer());
 534     GCTraceTime(Info, gc) t("Pause Young", nullptr, gc_cause(), true);
 535 
 536     print_heap_before_gc();
 537 
 538     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 539       prepare_for_verify();
 540       prepared_for_verification = true;
 541     }
 542 
 543     gc_prologue(complete);
 544     increment_total_collections(complete);
 545 
 546     collect_generation(_young_gen,
 547                        full,
 548                        size,
 549                        is_tlab,
 550                        run_verification && VerifyGCLevel <= 0,
 551                        do_clear_all_soft_refs);
 552 
 553     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 554         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 555       // Allocation request was met by young GC.
 556       size = 0;
 557     }
 558 
 559     // Ask if young collection is enough. If so, do the final steps for young collection,
 560     // and fallthrough to the end.
 561     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 562     if (!do_full_collection) {
 563       // Adjust generation sizes.
 564       _young_gen->compute_new_size();
 565 
 566       print_heap_change(pre_gc_values);
 567 
 568       // Track memory usage and detect low memory after GC finishes
 569       MemoryService::track_memory_usage();
 570 
 571       gc_epilogue(complete);
 572     }
 573 
 574     print_heap_after_gc();
 575 
 576   } else {
 577     // No young collection, ask if we need to perform Full collection.
 578     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 579   }
 580 
 581   if (do_full_collection) {
 582     GCIdMark gc_id_mark;
 583     GCTraceCPUTime tcpu(GenMarkSweep::gc_tracer());
 584     GCTraceTime(Info, gc) t("Pause Full", nullptr, gc_cause(), true);
 585 
 586     print_heap_before_gc();
 587 
 588     if (!prepared_for_verification && run_verification &&
 589         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 590       prepare_for_verify();
 591     }
 592 
 593     if (!do_young_collection) {
 594       gc_prologue(complete);
 595       increment_total_collections(complete);
 596     }
 597 
 598     // Accounting quirk: total full collections would be incremented when "complete"
 599     // is set, by calling increment_total_collections above. However, we also need to
 600     // account Full collections that had "complete" unset.
 601     if (!complete) {
 602       increment_total_full_collections();
 603     }
 604 
 605     CodeCache::on_gc_marking_cycle_start();
 606 
 607     collect_generation(_old_gen,
 608                        full,
 609                        size,
 610                        is_tlab,
 611                        run_verification && VerifyGCLevel <= 1,
 612                        do_clear_all_soft_refs);
 613 
 614     CodeCache::on_gc_marking_cycle_finish();
 615     CodeCache::arm_all_nmethods();
 616 
 617     // Adjust generation sizes.
 618     _old_gen->compute_new_size();
 619     _young_gen->compute_new_size();
 620 
 621     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 622     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 623     DEBUG_ONLY(MetaspaceUtils::verify();)
 624 
 625     // Need to clear claim bits for the next mark.
 626     ClassLoaderDataGraph::clear_claimed_marks();
 627 
 628     // Resize the metaspace capacity after full collections
 629     MetaspaceGC::compute_new_size();
 630     update_full_collections_completed();
 631 
 632     print_heap_change(pre_gc_values);
 633 
 634     // Track memory usage and detect low memory after GC finishes
 635     MemoryService::track_memory_usage();
 636 
 637     // Need to tell the epilogue code we are done with Full GC, regardless what was
 638     // the initial value for "complete" flag.
 639     gc_epilogue(true);
 640 
 641     print_heap_after_gc();
 642   }
 643 }
 644 
 645 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 646                                                  GenCollectedHeap::GenerationType max_gen) const {
 647   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 648 }
 649 
 650 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 651   ScavengableNMethods::register_nmethod(nm);
 652 }
 653 
 654 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 655   ScavengableNMethods::unregister_nmethod(nm);
 656 }
 657 
 658 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 659   ScavengableNMethods::verify_nmethod(nm);
 660 }
 661 
 662 void GenCollectedHeap::prune_scavengable_nmethods() {
 663   ScavengableNMethods::prune_nmethods();
 664 }
 665 
 666 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 667   GCCauseSetter x(this, GCCause::_allocation_failure);
 668   HeapWord* result = nullptr;
 669 
 670   assert(size != 0, "Precondition violated");
 671   if (GCLocker::is_active_and_needs_gc()) {
 672     // GC locker is active; instead of a collection we will attempt
 673     // to expand the heap, if there's room for expansion.
 674     if (!is_maximal_no_gc()) {
 675       result = expand_heap_and_allocate(size, is_tlab);
 676     }
 677     return result;   // Could be null if we are out of space.
 678   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 679     // Do an incremental collection.
 680     do_collection(false,                     // full
 681                   false,                     // clear_all_soft_refs
 682                   size,                      // size
 683                   is_tlab,                   // is_tlab
 684                   GenCollectedHeap::OldGen); // max_generation
 685   } else {
 686     log_trace(gc)(" :: Trying full because partial may fail :: ");
 687     // Try a full collection; see delta for bug id 6266275
 688     // for the original code and why this has been simplified
 689     // with from-space allocation criteria modified and
 690     // such allocation moved out of the safepoint path.
 691     do_collection(true,                      // full
 692                   false,                     // clear_all_soft_refs
 693                   size,                      // size
 694                   is_tlab,                   // is_tlab
 695                   GenCollectedHeap::OldGen); // max_generation
 696   }
 697 
 698   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 699 
 700   if (result != nullptr) {
 701     assert(is_in_reserved(result), "result not in heap");
 702     return result;
 703   }
 704 
 705   // OK, collection failed, try expansion.
 706   result = expand_heap_and_allocate(size, is_tlab);
 707   if (result != nullptr) {
 708     return result;
 709   }
 710 
 711   // If we reach this point, we're really out of memory. Try every trick
 712   // we can to reclaim memory. Force collection of soft references. Force
 713   // a complete compaction of the heap. Any additional methods for finding
 714   // free memory should be here, especially if they are expensive. If this
 715   // attempt fails, an OOM exception will be thrown.
 716   {
 717     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 718 
 719     do_collection(true,                      // full
 720                   true,                      // clear_all_soft_refs
 721                   size,                      // size
 722                   is_tlab,                   // is_tlab
 723                   GenCollectedHeap::OldGen); // max_generation
 724   }
 725 
 726   result = attempt_allocation(size, is_tlab, false /* first_only */);
 727   if (result != nullptr) {
 728     assert(is_in_reserved(result), "result not in heap");
 729     return result;
 730   }
 731 
 732   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 733     "Flag should have been handled and cleared prior to this point");
 734 
 735   // What else?  We might try synchronous finalization later.  If the total
 736   // space available is large enough for the allocation, then a more
 737   // complete compaction phase than we've tried so far might be
 738   // appropriate.
 739   return nullptr;
 740 }
 741 
 742 #ifdef ASSERT
 743 class AssertNonScavengableClosure: public OopClosure {
 744 public:
 745   virtual void do_oop(oop* p) {
 746     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 747       "Referent should not be scavengable.");  }
 748   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 749 };
 750 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 751 #endif
 752 
 753 void GenCollectedHeap::process_roots(ScanningOption so,
 754                                      OopClosure* strong_roots,
 755                                      CLDClosure* strong_cld_closure,
 756                                      CLDClosure* weak_cld_closure,
 757                                      CodeBlobToOopClosure* code_roots) {
 758   // General roots.
 759   assert(code_roots != nullptr, "code root closure should always be set");
 760 
 761   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 762 
 763   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 764   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? nullptr : code_roots;
 765 
 766   Threads::oops_do(strong_roots, roots_from_code_p);
 767 
 768   OopStorageSet::strong_oops_do(strong_roots);
 769 
 770   if (so & SO_ScavengeCodeCache) {
 771     assert(code_roots != nullptr, "must supply closure for code cache");
 772 
 773     // We only visit parts of the CodeCache when scavenging.
 774     ScavengableNMethods::nmethods_do(code_roots);
 775   }
 776   if (so & SO_AllCodeCache) {
 777     assert(code_roots != nullptr, "must supply closure for code cache");
 778 
 779     // CMSCollector uses this to do intermediate-strength collections.
 780     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 781     CodeCache::blobs_do(code_roots);
 782   }
 783   // Verify that the code cache contents are not subject to
 784   // movement by a scavenging collection.
 785   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 786   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 787 }
 788 
 789 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 790   WeakProcessor::oops_do(root_closure);
 791 }
 792 
 793 bool GenCollectedHeap::no_allocs_since_save_marks() {
 794   return _young_gen->no_allocs_since_save_marks() &&
 795          _old_gen->no_allocs_since_save_marks();
 796 }
 797 
 798 // public collection interfaces
 799 void GenCollectedHeap::collect(GCCause::Cause cause) {
 800   // The caller doesn't have the Heap_lock
 801   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 802 
 803   unsigned int gc_count_before;
 804   unsigned int full_gc_count_before;
 805 
 806   {
 807     MutexLocker ml(Heap_lock);
 808     // Read the GC count while holding the Heap_lock
 809     gc_count_before      = total_collections();
 810     full_gc_count_before = total_full_collections();
 811   }
 812 
 813   if (GCLocker::should_discard(cause, gc_count_before)) {
 814     return;
 815   }
 816 
 817   bool should_run_young_gc =  (cause == GCCause::_wb_young_gc)
 818                            || (cause == GCCause::_gc_locker)
 819                 DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
 820 
 821   const GenerationType max_generation = should_run_young_gc
 822                                       ? YoungGen
 823                                       : OldGen;
 824 
 825   VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 826                        cause, max_generation);
 827   VMThread::execute(&op);
 828 }
 829 
 830 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 831    do_full_collection(clear_all_soft_refs, OldGen);
 832 }
 833 
 834 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 835                                           GenerationType last_generation) {
 836   do_collection(true,                   // full
 837                 clear_all_soft_refs,    // clear_all_soft_refs
 838                 0,                      // size
 839                 false,                  // is_tlab
 840                 last_generation);       // last_generation
 841   // Hack XXX FIX ME !!!
 842   // A scavenge may not have been attempted, or may have
 843   // been attempted and failed, because the old gen was too full
 844   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 845     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 846     // This time allow the old gen to be collected as well
 847     do_collection(true,                // full
 848                   clear_all_soft_refs, // clear_all_soft_refs
 849                   0,                   // size
 850                   false,               // is_tlab
 851                   OldGen);             // last_generation
 852   }
 853 }
 854 
 855 bool GenCollectedHeap::is_in_young(const void* p) const {
 856   bool result = p < _old_gen->reserved().start();
 857   assert(result == _young_gen->is_in_reserved(p),
 858          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i(p));
 859   return result;
 860 }
 861 
 862 bool GenCollectedHeap::requires_barriers(stackChunkOop obj) const {
 863   return !is_in_young(obj);
 864 }
 865 
 866 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 867 bool GenCollectedHeap::is_in(const void* p) const {
 868   return _young_gen->is_in(p) || _old_gen->is_in(p);
 869 }
 870 
 871 #ifdef ASSERT
 872 // Don't implement this by using is_in_young().  This method is used
 873 // in some cases to check that is_in_young() is correct.
 874 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 875   assert(is_in_reserved(p) || p == nullptr,
 876     "Does not work if address is non-null and outside of the heap");
 877   return p < _young_gen->reserved().end() && p != nullptr;
 878 }
 879 #endif
 880 
 881 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
 882   _young_gen->oop_iterate(cl);
 883   _old_gen->oop_iterate(cl);
 884 }
 885 
 886 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 887   _young_gen->object_iterate(cl);
 888   _old_gen->object_iterate(cl);
 889 }
 890 
 891 Space* GenCollectedHeap::space_containing(const void* addr) const {
 892   Space* res = _young_gen->space_containing(addr);
 893   if (res != nullptr) {
 894     return res;
 895   }
 896   res = _old_gen->space_containing(addr);
 897   assert(res != nullptr, "Could not find containing space");
 898   return res;
 899 }
 900 
 901 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 902   assert(is_in_reserved(addr), "block_start of address outside of heap");
 903   if (_young_gen->is_in_reserved(addr)) {
 904     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 905     return _young_gen->block_start(addr);
 906   }
 907 
 908   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 909   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 910   return _old_gen->block_start(addr);
 911 }
 912 
 913 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 914   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 915   assert(block_start(addr) == addr, "addr must be a block start");
 916   if (_young_gen->is_in_reserved(addr)) {
 917     return _young_gen->block_is_obj(addr);
 918   }
 919 
 920   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 921   return _old_gen->block_is_obj(addr);
 922 }
 923 
 924 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 925   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 926   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 927   return _young_gen->tlab_capacity();
 928 }
 929 
 930 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 931   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 932   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 933   return _young_gen->tlab_used();
 934 }
 935 
 936 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 937   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 938   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 939   return _young_gen->unsafe_max_tlab_alloc();
 940 }
 941 
 942 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
 943                                               size_t requested_size,
 944                                               size_t* actual_size) {
 945   bool gc_overhead_limit_was_exceeded;
 946   HeapWord* result = mem_allocate_work(requested_size /* size */,
 947                                        true /* is_tlab */,
 948                                        &gc_overhead_limit_was_exceeded);
 949   if (result != nullptr) {
 950     *actual_size = requested_size;
 951   }
 952 
 953   return result;
 954 }
 955 
 956 // Requires "*prev_ptr" to be non-null.  Deletes and a block of minimal size
 957 // from the list headed by "*prev_ptr".
 958 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
 959   bool first = true;
 960   size_t min_size = 0;   // "first" makes this conceptually infinite.
 961   ScratchBlock **smallest_ptr, *smallest;
 962   ScratchBlock  *cur = *prev_ptr;
 963   while (cur) {
 964     assert(*prev_ptr == cur, "just checking");
 965     if (first || cur->num_words < min_size) {
 966       smallest_ptr = prev_ptr;
 967       smallest     = cur;
 968       min_size     = smallest->num_words;
 969       first        = false;
 970     }
 971     prev_ptr = &cur->next;
 972     cur     =  cur->next;
 973   }
 974   smallest      = *smallest_ptr;
 975   *smallest_ptr = smallest->next;
 976   return smallest;
 977 }
 978 
 979 // Sort the scratch block list headed by res into decreasing size order,
 980 // and set "res" to the result.
 981 static void sort_scratch_list(ScratchBlock*& list) {
 982   ScratchBlock* sorted = nullptr;
 983   ScratchBlock* unsorted = list;
 984   while (unsorted) {
 985     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
 986     smallest->next  = sorted;
 987     sorted          = smallest;
 988   }
 989   list = sorted;
 990 }
 991 
 992 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
 993                                                size_t max_alloc_words) {
 994   ScratchBlock* res = nullptr;
 995   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
 996   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
 997   sort_scratch_list(res);
 998   return res;
 999 }
1000 
1001 void GenCollectedHeap::release_scratch() {
1002   _young_gen->reset_scratch();
1003   _old_gen->reset_scratch();
1004 }
1005 
1006 void GenCollectedHeap::prepare_for_verify() {
1007   ensure_parsability(false);        // no need to retire TLABs
1008 }
1009 
1010 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1011                                           bool old_to_young) {
1012   if (old_to_young) {
1013     cl->do_generation(_old_gen);
1014     cl->do_generation(_young_gen);
1015   } else {
1016     cl->do_generation(_young_gen);
1017     cl->do_generation(_old_gen);
1018   }
1019 }
1020 
1021 bool GenCollectedHeap::is_maximal_no_gc() const {
1022   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1023 }
1024 
1025 void GenCollectedHeap::save_marks() {
1026   _young_gen->save_marks();
1027   _old_gen->save_marks();
1028 }
1029 
1030 GenCollectedHeap* GenCollectedHeap::heap() {
1031   // SerialHeap is the only subtype of GenCollectedHeap.
1032   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
1033 }
1034 
1035 #if INCLUDE_SERIALGC
1036 void GenCollectedHeap::prepare_for_compaction() {
1037   // Start by compacting into same gen.
1038   CompactPoint cp(_old_gen);
1039   _forwarding->clear();
1040   _old_gen->prepare_for_compaction(&cp);
1041   _young_gen->prepare_for_compaction(&cp);
1042 }
1043 #endif // INCLUDE_SERIALGC
1044 
1045 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1046   log_debug(gc, verify)("%s", _old_gen->name());
1047   _old_gen->verify();
1048 
1049   log_debug(gc, verify)("%s", _young_gen->name());
1050   _young_gen->verify();
1051 
1052   log_debug(gc, verify)("RemSet");
1053   rem_set()->verify();
1054 }
1055 
1056 void GenCollectedHeap::print_on(outputStream* st) const {
1057   if (_young_gen != nullptr) {
1058     _young_gen->print_on(st);
1059   }
1060   if (_old_gen != nullptr) {
1061     _old_gen->print_on(st);
1062   }
1063   MetaspaceUtils::print_on(st);
1064 }
1065 
1066 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1067 }
1068 
1069 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
1070   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
1071 }
1072 
1073 void GenCollectedHeap::print_tracing_info() const {
1074   if (log_is_enabled(Debug, gc, heap, exit)) {
1075     LogStreamHandle(Debug, gc, heap, exit) lsh;
1076     _young_gen->print_summary_info_on(&lsh);
1077     _old_gen->print_summary_info_on(&lsh);
1078   }
1079 }
1080 
1081 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
1082   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
1083 
1084   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
1085                      HEAP_CHANGE_FORMAT" "
1086                      HEAP_CHANGE_FORMAT,
1087                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1088                                              pre_gc_values.young_gen_used(),
1089                                              pre_gc_values.young_gen_capacity(),
1090                                              def_new_gen->used(),
1091                                              def_new_gen->capacity()),
1092                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1093                                              pre_gc_values.eden_used(),
1094                                              pre_gc_values.eden_capacity(),
1095                                              def_new_gen->eden()->used(),
1096                                              def_new_gen->eden()->capacity()),
1097                      HEAP_CHANGE_FORMAT_ARGS("From",
1098                                              pre_gc_values.from_used(),
1099                                              pre_gc_values.from_capacity(),
1100                                              def_new_gen->from()->used(),
1101                                              def_new_gen->from()->capacity()));
1102   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1103                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1104                                              pre_gc_values.old_gen_used(),
1105                                              pre_gc_values.old_gen_capacity(),
1106                                              old_gen()->used(),
1107                                              old_gen()->capacity()));
1108   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1109 }
1110 
1111 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1112  private:
1113   bool _full;
1114  public:
1115   void do_generation(Generation* gen) {
1116     gen->gc_prologue(_full);
1117   }
1118   GenGCPrologueClosure(bool full) : _full(full) {};
1119 };
1120 
1121 void GenCollectedHeap::gc_prologue(bool full) {
1122   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1123 
1124   // Fill TLAB's and such
1125   ensure_parsability(true);   // retire TLABs
1126 
1127   // Walk generations
1128   GenGCPrologueClosure blk(full);
1129   generation_iterate(&blk, false);  // not old-to-young.
1130 };
1131 
1132 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1133  private:
1134   bool _full;
1135  public:
1136   void do_generation(Generation* gen) {
1137     gen->gc_epilogue(_full);
1138   }
1139   GenGCEpilogueClosure(bool full) : _full(full) {};
1140 };
1141 
1142 void GenCollectedHeap::gc_epilogue(bool full) {
1143 #if COMPILER2_OR_JVMCI
1144   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1145 #endif // COMPILER2_OR_JVMCI
1146 
1147   resize_all_tlabs();
1148 
1149   GenGCEpilogueClosure blk(full);
1150   generation_iterate(&blk, false);  // not old-to-young.
1151 
1152   MetaspaceCounters::update_performance_counters();
1153 };
1154 
1155 #ifndef PRODUCT
1156 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1157  private:
1158  public:
1159   void do_generation(Generation* gen) {
1160     gen->record_spaces_top();
1161   }
1162 };
1163 
1164 void GenCollectedHeap::record_gen_tops_before_GC() {
1165   if (ZapUnusedHeapArea) {
1166     GenGCSaveTopsBeforeGCClosure blk;
1167     generation_iterate(&blk, false);  // not old-to-young.
1168   }
1169 }
1170 #endif  // not PRODUCT
1171 
1172 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1173  public:
1174   void do_generation(Generation* gen) {
1175     gen->ensure_parsability();
1176   }
1177 };
1178 
1179 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1180   CollectedHeap::ensure_parsability(retire_tlabs);
1181   GenEnsureParsabilityClosure ep_cl;
1182   generation_iterate(&ep_cl, false);
1183 }