1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/serial/defNewGeneration.hpp"
  34 #include "gc/shared/adaptiveSizePolicy.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTableRS.hpp"
  37 #include "gc/shared/collectedHeap.inline.hpp"
  38 #include "gc/shared/collectorCounters.hpp"
  39 #include "gc/shared/gcId.hpp"
  40 #include "gc/shared/gcLocker.hpp"
  41 #include "gc/shared/gcPolicyCounters.hpp"
  42 #include "gc/shared/gcTrace.hpp"
  43 #include "gc/shared/gcTraceTime.inline.hpp"
  44 #include "gc/shared/genArguments.hpp"
  45 #include "gc/shared/gcVMOperations.hpp"
  46 #include "gc/shared/genCollectedHeap.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/generationSpec.hpp"
  49 #include "gc/shared/gcInitLogger.hpp"
  50 #include "gc/shared/locationPrinter.inline.hpp"
  51 #include "gc/shared/oopStorage.inline.hpp"
  52 #include "gc/shared/oopStorageSet.inline.hpp"
  53 #include "gc/shared/oopStorageParState.inline.hpp"
  54 #include "gc/shared/scavengableNMethods.hpp"
  55 #include "gc/shared/slidingForwarding.hpp"
  56 #include "gc/shared/space.hpp"
  57 #include "gc/shared/strongRootsScope.hpp"
  58 #include "gc/shared/weakProcessor.hpp"
  59 #include "gc/shared/workgroup.hpp"
  60 #include "memory/iterator.hpp"
  61 #include "memory/metaspaceCounters.hpp"
  62 #include "memory/metaspaceUtils.hpp"
  63 #include "memory/resourceArea.hpp"
  64 #include "memory/universe.hpp"
  65 #include "oops/oop.inline.hpp"
  66 #include "runtime/biasedLocking.hpp"
  67 #include "runtime/handles.hpp"
  68 #include "runtime/handles.inline.hpp"
  69 #include "runtime/java.hpp"
  70 #include "runtime/vmThread.hpp"
  71 #include "services/memoryService.hpp"
  72 #include "utilities/autoRestore.hpp"
  73 #include "utilities/debug.hpp"
  74 #include "utilities/formatBuffer.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/stack.inline.hpp"
  77 #include "utilities/vmError.hpp"
  78 #if INCLUDE_JVMCI
  79 #include "jvmci/jvmci.hpp"
  80 #endif
  81 
  82 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  83                                    Generation::Name old,
  84                                    const char* policy_counters_name) :
  85   CollectedHeap(),
  86   _young_gen(NULL),
  87   _old_gen(NULL),
  88   _young_gen_spec(new GenerationSpec(young,
  89                                      NewSize,
  90                                      MaxNewSize,
  91                                      GenAlignment)),
  92   _old_gen_spec(new GenerationSpec(old,
  93                                    OldSize,
  94                                    MaxOldSize,
  95                                    GenAlignment)),
  96   _rem_set(NULL),
  97   _soft_ref_gen_policy(),
  98   _size_policy(NULL),
  99   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
 100   _incremental_collection_failed(false),
 101   _full_collections_completed(0),
 102   _young_manager(NULL),
 103   _old_manager(NULL) {
 104 }
 105 
 106 jint GenCollectedHeap::initialize() {
 107   // While there are no constraints in the GC code that HeapWordSize
 108   // be any particular value, there are multiple other areas in the
 109   // system which believe this to be true (e.g. oop->object_size in some
 110   // cases incorrectly returns the size in wordSize units rather than
 111   // HeapWordSize).
 112   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 113 
 114   // Allocate space for the heap.
 115 
 116   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 117 
 118   if (!heap_rs.is_reserved()) {
 119     vm_shutdown_during_initialization(
 120       "Could not reserve enough space for object heap");
 121     return JNI_ENOMEM;
 122   }
 123 
 124   initialize_reserved_region(heap_rs);
 125 
 126   _rem_set = create_rem_set(heap_rs.region());
 127   _rem_set->initialize();
 128   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 129   bs->initialize();
 130   BarrierSet::set_barrier_set(bs);
 131 
 132   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
 133   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 134   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 135 
 136   old_rs = old_rs.first_part(_old_gen_spec->max_size());
 137   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 138 
 139   GCInitLogger::print();
 140 
 141   SlidingForwarding::initialize(_reserved, SpaceAlignment / HeapWordSize);
 142 
 143   return JNI_OK;
 144 }
 145 
 146 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 147   return new CardTableRS(reserved_region);
 148 }
 149 
 150 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
 151                                               size_t init_promo_size,
 152                                               size_t init_survivor_size) {
 153   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
 154   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 155                                         init_promo_size,
 156                                         init_survivor_size,
 157                                         max_gc_pause_sec,
 158                                         GCTimeRatio);
 159 }
 160 
 161 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
 162   // Now figure out the total size.
 163   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 164   assert(alignment % pageSize == 0, "Must be");
 165 
 166   // Check for overflow.
 167   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 168   if (total_reserved < _young_gen_spec->max_size()) {
 169     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 170                                   "the maximum representable size");
 171   }
 172   assert(total_reserved % alignment == 0,
 173          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 174          SIZE_FORMAT, total_reserved, alignment);
 175 
 176   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 177   size_t used_page_size = heap_rs.page_size();
 178 
 179   os::trace_page_sizes("Heap",
 180                        MinHeapSize,
 181                        total_reserved,
 182                        used_page_size,
 183                        heap_rs.base(),
 184                        heap_rs.size());
 185 
 186   return heap_rs;
 187 }
 188 
 189 class GenIsScavengable : public BoolObjectClosure {
 190 public:
 191   bool do_object_b(oop obj) {
 192     return GenCollectedHeap::heap()->is_in_young(obj);
 193   }
 194 };
 195 
 196 static GenIsScavengable _is_scavengable;
 197 
 198 void GenCollectedHeap::post_initialize() {
 199   CollectedHeap::post_initialize();
 200   ref_processing_init();
 201 
 202   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 203 
 204   initialize_size_policy(def_new_gen->eden()->capacity(),
 205                          _old_gen->capacity(),
 206                          def_new_gen->from()->capacity());
 207 
 208   MarkSweep::initialize();
 209 
 210   ScavengableNMethods::initialize(&_is_scavengable);
 211 }
 212 
 213 void GenCollectedHeap::ref_processing_init() {
 214   _young_gen->ref_processor_init();
 215   _old_gen->ref_processor_init();
 216 }
 217 
 218 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
 219   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 220 
 221   return PreGenGCValues(def_new_gen->used(),
 222                         def_new_gen->capacity(),
 223                         def_new_gen->eden()->used(),
 224                         def_new_gen->eden()->capacity(),
 225                         def_new_gen->from()->used(),
 226                         def_new_gen->from()->capacity(),
 227                         old_gen()->used(),
 228                         old_gen()->capacity());
 229 }
 230 
 231 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 232   return _young_gen_spec;
 233 }
 234 
 235 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 236   return _old_gen_spec;
 237 }
 238 
 239 size_t GenCollectedHeap::capacity() const {
 240   return _young_gen->capacity() + _old_gen->capacity();
 241 }
 242 
 243 size_t GenCollectedHeap::used() const {
 244   return _young_gen->used() + _old_gen->used();
 245 }
 246 
 247 void GenCollectedHeap::save_used_regions() {
 248   _old_gen->save_used_region();
 249   _young_gen->save_used_region();
 250 }
 251 
 252 size_t GenCollectedHeap::max_capacity() const {
 253   return _young_gen->max_capacity() + _old_gen->max_capacity();
 254 }
 255 
 256 // Update the _full_collections_completed counter
 257 // at the end of a stop-world full GC.
 258 unsigned int GenCollectedHeap::update_full_collections_completed() {
 259   assert(_full_collections_completed <= _total_full_collections,
 260          "Can't complete more collections than were started");
 261   _full_collections_completed = _total_full_collections;
 262   return _full_collections_completed;
 263 }
 264 
 265 // Return true if any of the following is true:
 266 // . the allocation won't fit into the current young gen heap
 267 // . gc locker is occupied (jni critical section)
 268 // . heap memory is tight -- the most recent previous collection
 269 //   was a full collection because a partial collection (would
 270 //   have) failed and is likely to fail again
 271 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 272   size_t young_capacity = _young_gen->capacity_before_gc();
 273   return    (word_size > heap_word_size(young_capacity))
 274          || GCLocker::is_active_and_needs_gc()
 275          || incremental_collection_failed();
 276 }
 277 
 278 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 279   HeapWord* result = NULL;
 280   if (_old_gen->should_allocate(size, is_tlab)) {
 281     result = _old_gen->expand_and_allocate(size, is_tlab);
 282   }
 283   if (result == NULL) {
 284     if (_young_gen->should_allocate(size, is_tlab)) {
 285       result = _young_gen->expand_and_allocate(size, is_tlab);
 286     }
 287   }
 288   assert(result == NULL || is_in_reserved(result), "result not in heap");
 289   return result;
 290 }
 291 
 292 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 293                                               bool is_tlab,
 294                                               bool* gc_overhead_limit_was_exceeded) {
 295   // In general gc_overhead_limit_was_exceeded should be false so
 296   // set it so here and reset it to true only if the gc time
 297   // limit is being exceeded as checked below.
 298   *gc_overhead_limit_was_exceeded = false;
 299 
 300   HeapWord* result = NULL;
 301 
 302   // Loop until the allocation is satisfied, or unsatisfied after GC.
 303   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 304 
 305     // First allocation attempt is lock-free.
 306     Generation *young = _young_gen;
 307     assert(young->supports_inline_contig_alloc(),
 308       "Otherwise, must do alloc within heap lock");
 309     if (young->should_allocate(size, is_tlab)) {
 310       result = young->par_allocate(size, is_tlab);
 311       if (result != NULL) {
 312         assert(is_in_reserved(result), "result not in heap");
 313         return result;
 314       }
 315     }
 316     uint gc_count_before;  // Read inside the Heap_lock locked region.
 317     {
 318       MutexLocker ml(Heap_lock);
 319       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 320       // Note that only large objects get a shot at being
 321       // allocated in later generations.
 322       bool first_only = !should_try_older_generation_allocation(size);
 323 
 324       result = attempt_allocation(size, is_tlab, first_only);
 325       if (result != NULL) {
 326         assert(is_in_reserved(result), "result not in heap");
 327         return result;
 328       }
 329 
 330       if (GCLocker::is_active_and_needs_gc()) {
 331         if (is_tlab) {
 332           return NULL;  // Caller will retry allocating individual object.
 333         }
 334         if (!is_maximal_no_gc()) {
 335           // Try and expand heap to satisfy request.
 336           result = expand_heap_and_allocate(size, is_tlab);
 337           // Result could be null if we are out of space.
 338           if (result != NULL) {
 339             return result;
 340           }
 341         }
 342 
 343         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 344           return NULL; // We didn't get to do a GC and we didn't get any memory.
 345         }
 346 
 347         // If this thread is not in a jni critical section, we stall
 348         // the requestor until the critical section has cleared and
 349         // GC allowed. When the critical section clears, a GC is
 350         // initiated by the last thread exiting the critical section; so
 351         // we retry the allocation sequence from the beginning of the loop,
 352         // rather than causing more, now probably unnecessary, GC attempts.
 353         JavaThread* jthr = JavaThread::current();
 354         if (!jthr->in_critical()) {
 355           MutexUnlocker mul(Heap_lock);
 356           // Wait for JNI critical section to be exited
 357           GCLocker::stall_until_clear();
 358           gclocker_stalled_count += 1;
 359           continue;
 360         } else {
 361           if (CheckJNICalls) {
 362             fatal("Possible deadlock due to allocating while"
 363                   " in jni critical section");
 364           }
 365           return NULL;
 366         }
 367       }
 368 
 369       // Read the gc count while the heap lock is held.
 370       gc_count_before = total_collections();
 371     }
 372 
 373     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 374     VMThread::execute(&op);
 375     if (op.prologue_succeeded()) {
 376       result = op.result();
 377       if (op.gc_locked()) {
 378          assert(result == NULL, "must be NULL if gc_locked() is true");
 379          continue;  // Retry and/or stall as necessary.
 380       }
 381 
 382       // Allocation has failed and a collection
 383       // has been done.  If the gc time limit was exceeded the
 384       // this time, return NULL so that an out-of-memory
 385       // will be thrown.  Clear gc_overhead_limit_exceeded
 386       // so that the overhead exceeded does not persist.
 387 
 388       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 389       const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
 390 
 391       if (limit_exceeded && softrefs_clear) {
 392         *gc_overhead_limit_was_exceeded = true;
 393         size_policy()->set_gc_overhead_limit_exceeded(false);
 394         if (op.result() != NULL) {
 395           CollectedHeap::fill_with_object(op.result(), size);
 396         }
 397         return NULL;
 398       }
 399       assert(result == NULL || is_in_reserved(result),
 400              "result not in heap");
 401       return result;
 402     }
 403 
 404     // Give a warning if we seem to be looping forever.
 405     if ((QueuedAllocationWarningCount > 0) &&
 406         (try_count % QueuedAllocationWarningCount == 0)) {
 407           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 408                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 409     }
 410   }
 411 }
 412 
 413 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 414                                                bool is_tlab,
 415                                                bool first_only) {
 416   HeapWord* res = NULL;
 417 
 418   if (_young_gen->should_allocate(size, is_tlab)) {
 419     res = _young_gen->allocate(size, is_tlab);
 420     if (res != NULL || first_only) {
 421       return res;
 422     }
 423   }
 424 
 425   if (_old_gen->should_allocate(size, is_tlab)) {
 426     res = _old_gen->allocate(size, is_tlab);
 427   }
 428 
 429   return res;
 430 }
 431 
 432 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 433                                          bool* gc_overhead_limit_was_exceeded) {
 434   return mem_allocate_work(size,
 435                            false /* is_tlab */,
 436                            gc_overhead_limit_was_exceeded);
 437 }
 438 
 439 bool GenCollectedHeap::must_clear_all_soft_refs() {
 440   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 441          _gc_cause == GCCause::_wb_full_gc;
 442 }
 443 
 444 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 445                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 446                                           bool restore_marks_for_biased_locking) {
 447   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 448   GCTraceTime(Trace, gc, phases) t1(title);
 449   TraceCollectorStats tcs(gen->counters());
 450   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause(), heap()->is_young_gen(gen) ? "end of minor GC" : "end of major GC");
 451 
 452   gen->stat_record()->invocations++;
 453   gen->stat_record()->accumulated_time.start();
 454 
 455   // Must be done anew before each collection because
 456   // a previous collection will do mangling and will
 457   // change top of some spaces.
 458   record_gen_tops_before_GC();
 459 
 460   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 461 
 462   if (run_verification && VerifyBeforeGC) {
 463     Universe::verify("Before GC");
 464   }
 465   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 466 
 467   if (restore_marks_for_biased_locking) {
 468     // We perform this mark word preservation work lazily
 469     // because it's only at this point that we know whether we
 470     // absolutely have to do it; we want to avoid doing it for
 471     // scavenge-only collections where it's unnecessary
 472     BiasedLocking::preserve_marks();
 473   }
 474 
 475   // Do collection work
 476   {
 477     // Note on ref discovery: For what appear to be historical reasons,
 478     // GCH enables and disabled (by enqueing) refs discovery.
 479     // In the future this should be moved into the generation's
 480     // collect method so that ref discovery and enqueueing concerns
 481     // are local to a generation. The collect method could return
 482     // an appropriate indication in the case that notification on
 483     // the ref lock was needed. This will make the treatment of
 484     // weak refs more uniform (and indeed remove such concerns
 485     // from GCH). XXX
 486 
 487     save_marks();   // save marks for all gens
 488     // We want to discover references, but not process them yet.
 489     // This mode is disabled in process_discovered_references if the
 490     // generation does some collection work, or in
 491     // enqueue_discovered_references if the generation returns
 492     // without doing any work.
 493     ReferenceProcessor* rp = gen->ref_processor();
 494     // If the discovery of ("weak") refs in this generation is
 495     // atomic wrt other collectors in this configuration, we
 496     // are guaranteed to have empty discovered ref lists.
 497     if (rp->discovery_is_atomic()) {
 498       rp->enable_discovery();
 499       rp->setup_policy(clear_soft_refs);
 500     } else {
 501       // collect() below will enable discovery as appropriate
 502     }
 503     gen->collect(full, clear_soft_refs, size, is_tlab);
 504     if (!rp->enqueuing_is_done()) {
 505       rp->disable_discovery();
 506     } else {
 507       rp->set_enqueuing_is_done(false);
 508     }
 509     rp->verify_no_references_recorded();
 510   }
 511 
 512   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 513 
 514   gen->stat_record()->accumulated_time.stop();
 515 
 516   update_gc_stats(gen, full);
 517 
 518   if (run_verification && VerifyAfterGC) {
 519     Universe::verify("After GC");
 520   }
 521 }
 522 
 523 void GenCollectedHeap::do_collection(bool           full,
 524                                      bool           clear_all_soft_refs,
 525                                      size_t         size,
 526                                      bool           is_tlab,
 527                                      GenerationType max_generation) {
 528   ResourceMark rm;
 529   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 530 
 531   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 532   assert(my_thread->is_VM_thread(), "only VM thread");
 533   assert(Heap_lock->is_locked(),
 534          "the requesting thread should have the Heap_lock");
 535   guarantee(!is_gc_active(), "collection is not reentrant");
 536 
 537   if (GCLocker::check_active_before_gc()) {
 538     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 539   }
 540 
 541   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 542                           soft_ref_policy()->should_clear_all_soft_refs();
 543 
 544   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 545 
 546   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
 547 
 548   bool complete = full && (max_generation == OldGen);
 549   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 550   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 551 
 552   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 553 
 554   bool run_verification = total_collections() >= VerifyGCStartAt;
 555   bool prepared_for_verification = false;
 556   bool do_full_collection = false;
 557 
 558   if (do_young_collection) {
 559     GCIdMark gc_id_mark;
 560     GCTraceCPUTime tcpu;
 561     GCTraceTime(Info, gc) t("Pause Young", NULL, gc_cause(), true);
 562 
 563     print_heap_before_gc();
 564 
 565     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 566       prepare_for_verify();
 567       prepared_for_verification = true;
 568     }
 569 
 570     gc_prologue(complete);
 571     increment_total_collections(complete);
 572 
 573     collect_generation(_young_gen,
 574                        full,
 575                        size,
 576                        is_tlab,
 577                        run_verification && VerifyGCLevel <= 0,
 578                        do_clear_all_soft_refs,
 579                        false);
 580 
 581     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 582         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 583       // Allocation request was met by young GC.
 584       size = 0;
 585     }
 586 
 587     // Ask if young collection is enough. If so, do the final steps for young collection,
 588     // and fallthrough to the end.
 589     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 590     if (!do_full_collection) {
 591       // Adjust generation sizes.
 592       _young_gen->compute_new_size();
 593 
 594       print_heap_change(pre_gc_values);
 595 
 596       // Track memory usage and detect low memory after GC finishes
 597       MemoryService::track_memory_usage();
 598 
 599       gc_epilogue(complete);
 600     }
 601 
 602     print_heap_after_gc();
 603 
 604   } else {
 605     // No young collection, ask if we need to perform Full collection.
 606     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 607   }
 608 
 609   if (do_full_collection) {
 610     GCIdMark gc_id_mark;
 611     GCTraceCPUTime tcpu;
 612     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
 613 
 614     print_heap_before_gc();
 615 
 616     if (!prepared_for_verification && run_verification &&
 617         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 618       prepare_for_verify();
 619     }
 620 
 621     if (!do_young_collection) {
 622       gc_prologue(complete);
 623       increment_total_collections(complete);
 624     }
 625 
 626     // Accounting quirk: total full collections would be incremented when "complete"
 627     // is set, by calling increment_total_collections above. However, we also need to
 628     // account Full collections that had "complete" unset.
 629     if (!complete) {
 630       increment_total_full_collections();
 631     }
 632 
 633     collect_generation(_old_gen,
 634                        full,
 635                        size,
 636                        is_tlab,
 637                        run_verification && VerifyGCLevel <= 1,
 638                        do_clear_all_soft_refs,
 639                        true);
 640 
 641     // Adjust generation sizes.
 642     _old_gen->compute_new_size();
 643     _young_gen->compute_new_size();
 644 
 645     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 646     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 647     DEBUG_ONLY(MetaspaceUtils::verify();)
 648     // Resize the metaspace capacity after full collections
 649     MetaspaceGC::compute_new_size();
 650     update_full_collections_completed();
 651 
 652     print_heap_change(pre_gc_values);
 653 
 654     // Track memory usage and detect low memory after GC finishes
 655     MemoryService::track_memory_usage();
 656 
 657     // Need to tell the epilogue code we are done with Full GC, regardless what was
 658     // the initial value for "complete" flag.
 659     gc_epilogue(true);
 660 
 661     BiasedLocking::restore_marks();
 662 
 663     print_heap_after_gc();
 664   }
 665 }
 666 
 667 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 668                                                  GenCollectedHeap::GenerationType max_gen) const {
 669   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 670 }
 671 
 672 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 673   ScavengableNMethods::register_nmethod(nm);
 674 }
 675 
 676 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 677   ScavengableNMethods::unregister_nmethod(nm);
 678 }
 679 
 680 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 681   ScavengableNMethods::verify_nmethod(nm);
 682 }
 683 
 684 void GenCollectedHeap::flush_nmethod(nmethod* nm) {
 685   // Do nothing.
 686 }
 687 
 688 void GenCollectedHeap::prune_scavengable_nmethods() {
 689   ScavengableNMethods::prune_nmethods();
 690 }
 691 
 692 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 693   GCCauseSetter x(this, GCCause::_allocation_failure);
 694   HeapWord* result = NULL;
 695 
 696   assert(size != 0, "Precondition violated");
 697   if (GCLocker::is_active_and_needs_gc()) {
 698     // GC locker is active; instead of a collection we will attempt
 699     // to expand the heap, if there's room for expansion.
 700     if (!is_maximal_no_gc()) {
 701       result = expand_heap_and_allocate(size, is_tlab);
 702     }
 703     return result;   // Could be null if we are out of space.
 704   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 705     // Do an incremental collection.
 706     do_collection(false,                     // full
 707                   false,                     // clear_all_soft_refs
 708                   size,                      // size
 709                   is_tlab,                   // is_tlab
 710                   GenCollectedHeap::OldGen); // max_generation
 711   } else {
 712     log_trace(gc)(" :: Trying full because partial may fail :: ");
 713     // Try a full collection; see delta for bug id 6266275
 714     // for the original code and why this has been simplified
 715     // with from-space allocation criteria modified and
 716     // such allocation moved out of the safepoint path.
 717     do_collection(true,                      // full
 718                   false,                     // clear_all_soft_refs
 719                   size,                      // size
 720                   is_tlab,                   // is_tlab
 721                   GenCollectedHeap::OldGen); // max_generation
 722   }
 723 
 724   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 725 
 726   if (result != NULL) {
 727     assert(is_in_reserved(result), "result not in heap");
 728     return result;
 729   }
 730 
 731   // OK, collection failed, try expansion.
 732   result = expand_heap_and_allocate(size, is_tlab);
 733   if (result != NULL) {
 734     return result;
 735   }
 736 
 737   // If we reach this point, we're really out of memory. Try every trick
 738   // we can to reclaim memory. Force collection of soft references. Force
 739   // a complete compaction of the heap. Any additional methods for finding
 740   // free memory should be here, especially if they are expensive. If this
 741   // attempt fails, an OOM exception will be thrown.
 742   {
 743     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 744 
 745     do_collection(true,                      // full
 746                   true,                      // clear_all_soft_refs
 747                   size,                      // size
 748                   is_tlab,                   // is_tlab
 749                   GenCollectedHeap::OldGen); // max_generation
 750   }
 751 
 752   result = attempt_allocation(size, is_tlab, false /* first_only */);
 753   if (result != NULL) {
 754     assert(is_in_reserved(result), "result not in heap");
 755     return result;
 756   }
 757 
 758   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 759     "Flag should have been handled and cleared prior to this point");
 760 
 761   // What else?  We might try synchronous finalization later.  If the total
 762   // space available is large enough for the allocation, then a more
 763   // complete compaction phase than we've tried so far might be
 764   // appropriate.
 765   return NULL;
 766 }
 767 
 768 #ifdef ASSERT
 769 class AssertNonScavengableClosure: public OopClosure {
 770 public:
 771   virtual void do_oop(oop* p) {
 772     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 773       "Referent should not be scavengable.");  }
 774   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 775 };
 776 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 777 #endif
 778 
 779 void GenCollectedHeap::process_roots(ScanningOption so,
 780                                      OopClosure* strong_roots,
 781                                      CLDClosure* strong_cld_closure,
 782                                      CLDClosure* weak_cld_closure,
 783                                      CodeBlobToOopClosure* code_roots) {
 784   // General roots.
 785   assert(code_roots != NULL, "code root closure should always be set");
 786 
 787   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 788 
 789   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 790   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 791 
 792   Threads::oops_do(strong_roots, roots_from_code_p);
 793 
 794   OopStorageSet::strong_oops_do(strong_roots);
 795 
 796   if (so & SO_ScavengeCodeCache) {
 797     assert(code_roots != NULL, "must supply closure for code cache");
 798 
 799     // We only visit parts of the CodeCache when scavenging.
 800     ScavengableNMethods::nmethods_do(code_roots);
 801   }
 802   if (so & SO_AllCodeCache) {
 803     assert(code_roots != NULL, "must supply closure for code cache");
 804 
 805     // CMSCollector uses this to do intermediate-strength collections.
 806     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 807     CodeCache::blobs_do(code_roots);
 808   }
 809   // Verify that the code cache contents are not subject to
 810   // movement by a scavenging collection.
 811   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 812   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 813 }
 814 
 815 void GenCollectedHeap::full_process_roots(bool is_adjust_phase,
 816                                           ScanningOption so,
 817                                           bool only_strong_roots,
 818                                           OopClosure* root_closure,
 819                                           CLDClosure* cld_closure) {
 820   MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
 821   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 822 
 823   process_roots(so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
 824 }
 825 
 826 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 827   WeakProcessor::oops_do(root_closure);
 828   _young_gen->ref_processor()->weak_oops_do(root_closure);
 829   _old_gen->ref_processor()->weak_oops_do(root_closure);
 830 }
 831 
 832 bool GenCollectedHeap::no_allocs_since_save_marks() {
 833   return _young_gen->no_allocs_since_save_marks() &&
 834          _old_gen->no_allocs_since_save_marks();
 835 }
 836 
 837 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 838   return _young_gen->supports_inline_contig_alloc();
 839 }
 840 
 841 HeapWord* volatile* GenCollectedHeap::top_addr() const {
 842   return _young_gen->top_addr();
 843 }
 844 
 845 HeapWord** GenCollectedHeap::end_addr() const {
 846   return _young_gen->end_addr();
 847 }
 848 
 849 // public collection interfaces
 850 
 851 void GenCollectedHeap::collect(GCCause::Cause cause) {
 852   if ((cause == GCCause::_wb_young_gc) ||
 853       (cause == GCCause::_gc_locker)) {
 854     // Young collection for WhiteBox or GCLocker.
 855     collect(cause, YoungGen);
 856   } else {
 857 #ifdef ASSERT
 858   if (cause == GCCause::_scavenge_alot) {
 859     // Young collection only.
 860     collect(cause, YoungGen);
 861   } else {
 862     // Stop-the-world full collection.
 863     collect(cause, OldGen);
 864   }
 865 #else
 866     // Stop-the-world full collection.
 867     collect(cause, OldGen);
 868 #endif
 869   }
 870 }
 871 
 872 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
 873   // The caller doesn't have the Heap_lock
 874   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 875   MutexLocker ml(Heap_lock);
 876   collect_locked(cause, max_generation);
 877 }
 878 
 879 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 880   // The caller has the Heap_lock
 881   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 882   collect_locked(cause, OldGen);
 883 }
 884 
 885 // this is the private collection interface
 886 // The Heap_lock is expected to be held on entry.
 887 
 888 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
 889   // Read the GC count while holding the Heap_lock
 890   unsigned int gc_count_before      = total_collections();
 891   unsigned int full_gc_count_before = total_full_collections();
 892 
 893   if (GCLocker::should_discard(cause, gc_count_before)) {
 894     return;
 895   }
 896 
 897   {
 898     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 899     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 900                          cause, max_generation);
 901     VMThread::execute(&op);
 902   }
 903 }
 904 
 905 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 906    do_full_collection(clear_all_soft_refs, OldGen);
 907 }
 908 
 909 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 910                                           GenerationType last_generation) {
 911   do_collection(true,                   // full
 912                 clear_all_soft_refs,    // clear_all_soft_refs
 913                 0,                      // size
 914                 false,                  // is_tlab
 915                 last_generation);       // last_generation
 916   // Hack XXX FIX ME !!!
 917   // A scavenge may not have been attempted, or may have
 918   // been attempted and failed, because the old gen was too full
 919   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 920     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 921     // This time allow the old gen to be collected as well
 922     do_collection(true,                // full
 923                   clear_all_soft_refs, // clear_all_soft_refs
 924                   0,                   // size
 925                   false,               // is_tlab
 926                   OldGen);             // last_generation
 927   }
 928 }
 929 
 930 bool GenCollectedHeap::is_in_young(oop p) {
 931   bool result = cast_from_oop<HeapWord*>(p) < _old_gen->reserved().start();
 932   assert(result == _young_gen->is_in_reserved(p),
 933          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
 934   return result;
 935 }
 936 
 937 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 938 bool GenCollectedHeap::is_in(const void* p) const {
 939   return _young_gen->is_in(p) || _old_gen->is_in(p);
 940 }
 941 
 942 #ifdef ASSERT
 943 // Don't implement this by using is_in_young().  This method is used
 944 // in some cases to check that is_in_young() is correct.
 945 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 946   assert(is_in_reserved(p) || p == NULL,
 947     "Does not work if address is non-null and outside of the heap");
 948   return p < _young_gen->reserved().end() && p != NULL;
 949 }
 950 #endif
 951 
 952 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
 953   _young_gen->oop_iterate(cl);
 954   _old_gen->oop_iterate(cl);
 955 }
 956 
 957 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 958   _young_gen->object_iterate(cl);
 959   _old_gen->object_iterate(cl);
 960 }
 961 
 962 Space* GenCollectedHeap::space_containing(const void* addr) const {
 963   Space* res = _young_gen->space_containing(addr);
 964   if (res != NULL) {
 965     return res;
 966   }
 967   res = _old_gen->space_containing(addr);
 968   assert(res != NULL, "Could not find containing space");
 969   return res;
 970 }
 971 
 972 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 973   assert(is_in_reserved(addr), "block_start of address outside of heap");
 974   if (_young_gen->is_in_reserved(addr)) {
 975     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 976     return _young_gen->block_start(addr);
 977   }
 978 
 979   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 980   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 981   return _old_gen->block_start(addr);
 982 }
 983 
 984 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 985   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 986   assert(block_start(addr) == addr, "addr must be a block start");
 987   if (_young_gen->is_in_reserved(addr)) {
 988     return _young_gen->block_is_obj(addr);
 989   }
 990 
 991   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 992   return _old_gen->block_is_obj(addr);
 993 }
 994 
 995 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 996   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 997   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 998   return _young_gen->tlab_capacity();
 999 }
1000 
1001 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
1002   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1003   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
1004   return _young_gen->tlab_used();
1005 }
1006 
1007 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1008   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1009   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
1010   return _young_gen->unsafe_max_tlab_alloc();
1011 }
1012 
1013 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
1014                                               size_t requested_size,
1015                                               size_t* actual_size) {
1016   bool gc_overhead_limit_was_exceeded;
1017   HeapWord* result = mem_allocate_work(requested_size /* size */,
1018                                        true /* is_tlab */,
1019                                        &gc_overhead_limit_was_exceeded);
1020   if (result != NULL) {
1021     *actual_size = requested_size;
1022   }
1023 
1024   return result;
1025 }
1026 
1027 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1028 // from the list headed by "*prev_ptr".
1029 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1030   bool first = true;
1031   size_t min_size = 0;   // "first" makes this conceptually infinite.
1032   ScratchBlock **smallest_ptr, *smallest;
1033   ScratchBlock  *cur = *prev_ptr;
1034   while (cur) {
1035     assert(*prev_ptr == cur, "just checking");
1036     if (first || cur->num_words < min_size) {
1037       smallest_ptr = prev_ptr;
1038       smallest     = cur;
1039       min_size     = smallest->num_words;
1040       first        = false;
1041     }
1042     prev_ptr = &cur->next;
1043     cur     =  cur->next;
1044   }
1045   smallest      = *smallest_ptr;
1046   *smallest_ptr = smallest->next;
1047   return smallest;
1048 }
1049 
1050 // Sort the scratch block list headed by res into decreasing size order,
1051 // and set "res" to the result.
1052 static void sort_scratch_list(ScratchBlock*& list) {
1053   ScratchBlock* sorted = NULL;
1054   ScratchBlock* unsorted = list;
1055   while (unsorted) {
1056     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1057     smallest->next  = sorted;
1058     sorted          = smallest;
1059   }
1060   list = sorted;
1061 }
1062 
1063 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1064                                                size_t max_alloc_words) {
1065   ScratchBlock* res = NULL;
1066   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1067   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1068   sort_scratch_list(res);
1069   return res;
1070 }
1071 
1072 void GenCollectedHeap::release_scratch() {
1073   _young_gen->reset_scratch();
1074   _old_gen->reset_scratch();
1075 }
1076 
1077 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1078   void do_generation(Generation* gen) {
1079     gen->prepare_for_verify();
1080   }
1081 };
1082 
1083 void GenCollectedHeap::prepare_for_verify() {
1084   ensure_parsability(false);        // no need to retire TLABs
1085   GenPrepareForVerifyClosure blk;
1086   generation_iterate(&blk, false);
1087 }
1088 
1089 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1090                                           bool old_to_young) {
1091   if (old_to_young) {
1092     cl->do_generation(_old_gen);
1093     cl->do_generation(_young_gen);
1094   } else {
1095     cl->do_generation(_young_gen);
1096     cl->do_generation(_old_gen);
1097   }
1098 }
1099 
1100 bool GenCollectedHeap::is_maximal_no_gc() const {
1101   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1102 }
1103 
1104 void GenCollectedHeap::save_marks() {
1105   _young_gen->save_marks();
1106   _old_gen->save_marks();
1107 }
1108 
1109 GenCollectedHeap* GenCollectedHeap::heap() {
1110   // SerialHeap is the only subtype of GenCollectedHeap.
1111   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
1112 }
1113 
1114 #if INCLUDE_SERIALGC
1115 void GenCollectedHeap::prepare_for_compaction() {
1116   // Start by compacting into same gen.
1117   CompactPoint cp(_old_gen);
1118   _old_gen->prepare_for_compaction(&cp);
1119   _young_gen->prepare_for_compaction(&cp);
1120 }
1121 #endif // INCLUDE_SERIALGC
1122 
1123 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1124   log_debug(gc, verify)("%s", _old_gen->name());
1125   _old_gen->verify();
1126 
1127   log_debug(gc, verify)("%s", _old_gen->name());
1128   _young_gen->verify();
1129 
1130   log_debug(gc, verify)("RemSet");
1131   rem_set()->verify();
1132 }
1133 
1134 void GenCollectedHeap::print_on(outputStream* st) const {
1135   if (_young_gen != NULL) {
1136     _young_gen->print_on(st);
1137   }
1138   if (_old_gen != NULL) {
1139     _old_gen->print_on(st);
1140   }
1141   MetaspaceUtils::print_on(st);
1142 }
1143 
1144 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1145 }
1146 
1147 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
1148   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
1149 }
1150 
1151 void GenCollectedHeap::print_tracing_info() const {
1152   if (log_is_enabled(Debug, gc, heap, exit)) {
1153     LogStreamHandle(Debug, gc, heap, exit) lsh;
1154     _young_gen->print_summary_info_on(&lsh);
1155     _old_gen->print_summary_info_on(&lsh);
1156   }
1157 }
1158 
1159 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
1160   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
1161 
1162   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
1163                      HEAP_CHANGE_FORMAT" "
1164                      HEAP_CHANGE_FORMAT,
1165                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1166                                              pre_gc_values.young_gen_used(),
1167                                              pre_gc_values.young_gen_capacity(),
1168                                              def_new_gen->used(),
1169                                              def_new_gen->capacity()),
1170                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1171                                              pre_gc_values.eden_used(),
1172                                              pre_gc_values.eden_capacity(),
1173                                              def_new_gen->eden()->used(),
1174                                              def_new_gen->eden()->capacity()),
1175                      HEAP_CHANGE_FORMAT_ARGS("From",
1176                                              pre_gc_values.from_used(),
1177                                              pre_gc_values.from_capacity(),
1178                                              def_new_gen->from()->used(),
1179                                              def_new_gen->from()->capacity()));
1180   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1181                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1182                                              pre_gc_values.old_gen_used(),
1183                                              pre_gc_values.old_gen_capacity(),
1184                                              old_gen()->used(),
1185                                              old_gen()->capacity()));
1186   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1187 }
1188 
1189 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1190  private:
1191   bool _full;
1192  public:
1193   void do_generation(Generation* gen) {
1194     gen->gc_prologue(_full);
1195   }
1196   GenGCPrologueClosure(bool full) : _full(full) {};
1197 };
1198 
1199 void GenCollectedHeap::gc_prologue(bool full) {
1200   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1201 
1202   // Fill TLAB's and such
1203   ensure_parsability(true);   // retire TLABs
1204 
1205   // Walk generations
1206   GenGCPrologueClosure blk(full);
1207   generation_iterate(&blk, false);  // not old-to-young.
1208 };
1209 
1210 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1211  private:
1212   bool _full;
1213  public:
1214   void do_generation(Generation* gen) {
1215     gen->gc_epilogue(_full);
1216   }
1217   GenGCEpilogueClosure(bool full) : _full(full) {};
1218 };
1219 
1220 void GenCollectedHeap::gc_epilogue(bool full) {
1221 #if COMPILER2_OR_JVMCI
1222   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1223   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1224   guarantee(!CompilerConfig::is_c2_or_jvmci_compiler_enabled() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1225 #endif // COMPILER2_OR_JVMCI
1226 
1227   resize_all_tlabs();
1228 
1229   GenGCEpilogueClosure blk(full);
1230   generation_iterate(&blk, false);  // not old-to-young.
1231 
1232   MetaspaceCounters::update_performance_counters();
1233 };
1234 
1235 #ifndef PRODUCT
1236 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1237  private:
1238  public:
1239   void do_generation(Generation* gen) {
1240     gen->record_spaces_top();
1241   }
1242 };
1243 
1244 void GenCollectedHeap::record_gen_tops_before_GC() {
1245   if (ZapUnusedHeapArea) {
1246     GenGCSaveTopsBeforeGCClosure blk;
1247     generation_iterate(&blk, false);  // not old-to-young.
1248   }
1249 }
1250 #endif  // not PRODUCT
1251 
1252 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1253  public:
1254   void do_generation(Generation* gen) {
1255     gen->ensure_parsability();
1256   }
1257 };
1258 
1259 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1260   CollectedHeap::ensure_parsability(retire_tlabs);
1261   GenEnsureParsabilityClosure ep_cl;
1262   generation_iterate(&ep_cl, false);
1263 }
1264 
1265 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1266                                               oop obj,
1267                                               size_t obj_size) {
1268   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1269   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1270   HeapWord* result = NULL;
1271 
1272   result = old_gen->expand_and_allocate(obj_size, false);
1273 
1274   if (result != NULL) {
1275     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size);
1276   }
1277   return cast_to_oop(result);
1278 }