1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/serial/defNewGeneration.hpp"
  34 #include "gc/shared/adaptiveSizePolicy.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTableRS.hpp"
  37 #include "gc/shared/collectedHeap.inline.hpp"
  38 #include "gc/shared/collectorCounters.hpp"
  39 #include "gc/shared/gcId.hpp"
  40 #include "gc/shared/gcLocker.hpp"
  41 #include "gc/shared/gcPolicyCounters.hpp"
  42 #include "gc/shared/gcTrace.hpp"
  43 #include "gc/shared/gcTraceTime.inline.hpp"
  44 #include "gc/shared/genArguments.hpp"
  45 #include "gc/shared/gcVMOperations.hpp"
  46 #include "gc/shared/genCollectedHeap.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/generationSpec.hpp"
  49 #include "gc/shared/gcInitLogger.hpp"
  50 #include "gc/shared/locationPrinter.inline.hpp"
  51 #include "gc/shared/oopStorage.inline.hpp"
  52 #include "gc/shared/oopStorageSet.inline.hpp"
  53 #include "gc/shared/oopStorageParState.inline.hpp"
  54 #include "gc/shared/scavengableNMethods.hpp"
  55 #include "gc/shared/slidingForwarding.hpp"
  56 #include "gc/shared/space.hpp"
  57 #include "gc/shared/strongRootsScope.hpp"
  58 #include "gc/shared/weakProcessor.hpp"
  59 #include "gc/shared/workgroup.hpp"
  60 #include "memory/iterator.hpp"
  61 #include "memory/metaspaceCounters.hpp"
  62 #include "memory/metaspaceUtils.hpp"
  63 #include "memory/resourceArea.hpp"
  64 #include "memory/universe.hpp"
  65 #include "oops/oop.inline.hpp"
  66 #include "runtime/biasedLocking.hpp"
  67 #include "runtime/handles.hpp"
  68 #include "runtime/handles.inline.hpp"
  69 #include "runtime/java.hpp"
  70 #include "runtime/vmThread.hpp"
  71 #include "services/memoryService.hpp"
  72 #include "utilities/autoRestore.hpp"
  73 #include "utilities/debug.hpp"
  74 #include "utilities/formatBuffer.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/stack.inline.hpp"
  77 #include "utilities/vmError.hpp"
  78 #if INCLUDE_JVMCI
  79 #include "jvmci/jvmci.hpp"
  80 #endif
  81 
  82 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  83                                    Generation::Name old,
  84                                    const char* policy_counters_name) :
  85   CollectedHeap(),
  86   _young_gen(NULL),
  87   _old_gen(NULL),
  88   _young_gen_spec(new GenerationSpec(young,
  89                                      NewSize,
  90                                      MaxNewSize,
  91                                      GenAlignment)),
  92   _old_gen_spec(new GenerationSpec(old,
  93                                    OldSize,
  94                                    MaxOldSize,
  95                                    GenAlignment)),
  96   _rem_set(NULL),
  97   _soft_ref_gen_policy(),
  98   _size_policy(NULL),
  99   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
 100   _incremental_collection_failed(false),
 101   _full_collections_completed(0),
 102   _young_manager(NULL),
 103   _old_manager(NULL) {
 104 }
 105 
 106 jint GenCollectedHeap::initialize() {
 107   // While there are no constraints in the GC code that HeapWordSize
 108   // be any particular value, there are multiple other areas in the
 109   // system which believe this to be true (e.g. oop->object_size in some
 110   // cases incorrectly returns the size in wordSize units rather than
 111   // HeapWordSize).
 112   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 113 
 114   // Allocate space for the heap.
 115 
 116   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 117 
 118   if (!heap_rs.is_reserved()) {
 119     vm_shutdown_during_initialization(
 120       "Could not reserve enough space for object heap");
 121     return JNI_ENOMEM;
 122   }
 123 
 124   initialize_reserved_region(heap_rs);
 125   _forwarding = new SlidingForwarding(_reserved);
 126 
 127   _rem_set = create_rem_set(heap_rs.region());
 128   _rem_set->initialize();
 129   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 130   bs->initialize();
 131   BarrierSet::set_barrier_set(bs);
 132 
 133   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
 134   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 135   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 136 
 137   old_rs = old_rs.first_part(_old_gen_spec->max_size());
 138   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 139 
 140   GCInitLogger::print();
 141 
 142   return JNI_OK;
 143 }
 144 
 145 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 146   return new CardTableRS(reserved_region);
 147 }
 148 
 149 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
 150                                               size_t init_promo_size,
 151                                               size_t init_survivor_size) {
 152   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
 153   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 154                                         init_promo_size,
 155                                         init_survivor_size,
 156                                         max_gc_pause_sec,
 157                                         GCTimeRatio);
 158 }
 159 
 160 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
 161   // Now figure out the total size.
 162   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 163   assert(alignment % pageSize == 0, "Must be");
 164 
 165   // Check for overflow.
 166   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 167   if (total_reserved < _young_gen_spec->max_size()) {
 168     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 169                                   "the maximum representable size");
 170   }
 171   assert(total_reserved % alignment == 0,
 172          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 173          SIZE_FORMAT, total_reserved, alignment);
 174 
 175   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 176   size_t used_page_size = heap_rs.page_size();
 177 
 178   os::trace_page_sizes("Heap",
 179                        MinHeapSize,
 180                        total_reserved,
 181                        used_page_size,
 182                        heap_rs.base(),
 183                        heap_rs.size());
 184 
 185   return heap_rs;
 186 }
 187 
 188 class GenIsScavengable : public BoolObjectClosure {
 189 public:
 190   bool do_object_b(oop obj) {
 191     return GenCollectedHeap::heap()->is_in_young(obj);
 192   }
 193 };
 194 
 195 static GenIsScavengable _is_scavengable;
 196 
 197 void GenCollectedHeap::post_initialize() {
 198   CollectedHeap::post_initialize();
 199   ref_processing_init();
 200 
 201   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 202 
 203   initialize_size_policy(def_new_gen->eden()->capacity(),
 204                          _old_gen->capacity(),
 205                          def_new_gen->from()->capacity());
 206 
 207   MarkSweep::initialize();
 208 
 209   ScavengableNMethods::initialize(&_is_scavengable);
 210 }
 211 
 212 void GenCollectedHeap::ref_processing_init() {
 213   _young_gen->ref_processor_init();
 214   _old_gen->ref_processor_init();
 215 }
 216 
 217 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
 218   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 219 
 220   return PreGenGCValues(def_new_gen->used(),
 221                         def_new_gen->capacity(),
 222                         def_new_gen->eden()->used(),
 223                         def_new_gen->eden()->capacity(),
 224                         def_new_gen->from()->used(),
 225                         def_new_gen->from()->capacity(),
 226                         old_gen()->used(),
 227                         old_gen()->capacity());
 228 }
 229 
 230 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 231   return _young_gen_spec;
 232 }
 233 
 234 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 235   return _old_gen_spec;
 236 }
 237 
 238 size_t GenCollectedHeap::capacity() const {
 239   return _young_gen->capacity() + _old_gen->capacity();
 240 }
 241 
 242 size_t GenCollectedHeap::used() const {
 243   return _young_gen->used() + _old_gen->used();
 244 }
 245 
 246 void GenCollectedHeap::save_used_regions() {
 247   _old_gen->save_used_region();
 248   _young_gen->save_used_region();
 249 }
 250 
 251 size_t GenCollectedHeap::max_capacity() const {
 252   return _young_gen->max_capacity() + _old_gen->max_capacity();
 253 }
 254 
 255 // Update the _full_collections_completed counter
 256 // at the end of a stop-world full GC.
 257 unsigned int GenCollectedHeap::update_full_collections_completed() {
 258   assert(_full_collections_completed <= _total_full_collections,
 259          "Can't complete more collections than were started");
 260   _full_collections_completed = _total_full_collections;
 261   return _full_collections_completed;
 262 }
 263 
 264 // Return true if any of the following is true:
 265 // . the allocation won't fit into the current young gen heap
 266 // . gc locker is occupied (jni critical section)
 267 // . heap memory is tight -- the most recent previous collection
 268 //   was a full collection because a partial collection (would
 269 //   have) failed and is likely to fail again
 270 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 271   size_t young_capacity = _young_gen->capacity_before_gc();
 272   return    (word_size > heap_word_size(young_capacity))
 273          || GCLocker::is_active_and_needs_gc()
 274          || incremental_collection_failed();
 275 }
 276 
 277 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 278   HeapWord* result = NULL;
 279   if (_old_gen->should_allocate(size, is_tlab)) {
 280     result = _old_gen->expand_and_allocate(size, is_tlab);
 281   }
 282   if (result == NULL) {
 283     if (_young_gen->should_allocate(size, is_tlab)) {
 284       result = _young_gen->expand_and_allocate(size, is_tlab);
 285     }
 286   }
 287   assert(result == NULL || is_in_reserved(result), "result not in heap");
 288   return result;
 289 }
 290 
 291 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 292                                               bool is_tlab,
 293                                               bool* gc_overhead_limit_was_exceeded) {
 294   // In general gc_overhead_limit_was_exceeded should be false so
 295   // set it so here and reset it to true only if the gc time
 296   // limit is being exceeded as checked below.
 297   *gc_overhead_limit_was_exceeded = false;
 298 
 299   HeapWord* result = NULL;
 300 
 301   // Loop until the allocation is satisfied, or unsatisfied after GC.
 302   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 303 
 304     // First allocation attempt is lock-free.
 305     Generation *young = _young_gen;
 306     assert(young->supports_inline_contig_alloc(),
 307       "Otherwise, must do alloc within heap lock");
 308     if (young->should_allocate(size, is_tlab)) {
 309       result = young->par_allocate(size, is_tlab);
 310       if (result != NULL) {
 311         assert(is_in_reserved(result), "result not in heap");
 312         return result;
 313       }
 314     }
 315     uint gc_count_before;  // Read inside the Heap_lock locked region.
 316     {
 317       MutexLocker ml(Heap_lock);
 318       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 319       // Note that only large objects get a shot at being
 320       // allocated in later generations.
 321       bool first_only = !should_try_older_generation_allocation(size);
 322 
 323       result = attempt_allocation(size, is_tlab, first_only);
 324       if (result != NULL) {
 325         assert(is_in_reserved(result), "result not in heap");
 326         return result;
 327       }
 328 
 329       if (GCLocker::is_active_and_needs_gc()) {
 330         if (is_tlab) {
 331           return NULL;  // Caller will retry allocating individual object.
 332         }
 333         if (!is_maximal_no_gc()) {
 334           // Try and expand heap to satisfy request.
 335           result = expand_heap_and_allocate(size, is_tlab);
 336           // Result could be null if we are out of space.
 337           if (result != NULL) {
 338             return result;
 339           }
 340         }
 341 
 342         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 343           return NULL; // We didn't get to do a GC and we didn't get any memory.
 344         }
 345 
 346         // If this thread is not in a jni critical section, we stall
 347         // the requestor until the critical section has cleared and
 348         // GC allowed. When the critical section clears, a GC is
 349         // initiated by the last thread exiting the critical section; so
 350         // we retry the allocation sequence from the beginning of the loop,
 351         // rather than causing more, now probably unnecessary, GC attempts.
 352         JavaThread* jthr = JavaThread::current();
 353         if (!jthr->in_critical()) {
 354           MutexUnlocker mul(Heap_lock);
 355           // Wait for JNI critical section to be exited
 356           GCLocker::stall_until_clear();
 357           gclocker_stalled_count += 1;
 358           continue;
 359         } else {
 360           if (CheckJNICalls) {
 361             fatal("Possible deadlock due to allocating while"
 362                   " in jni critical section");
 363           }
 364           return NULL;
 365         }
 366       }
 367 
 368       // Read the gc count while the heap lock is held.
 369       gc_count_before = total_collections();
 370     }
 371 
 372     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 373     VMThread::execute(&op);
 374     if (op.prologue_succeeded()) {
 375       result = op.result();
 376       if (op.gc_locked()) {
 377          assert(result == NULL, "must be NULL if gc_locked() is true");
 378          continue;  // Retry and/or stall as necessary.
 379       }
 380 
 381       // Allocation has failed and a collection
 382       // has been done.  If the gc time limit was exceeded the
 383       // this time, return NULL so that an out-of-memory
 384       // will be thrown.  Clear gc_overhead_limit_exceeded
 385       // so that the overhead exceeded does not persist.
 386 
 387       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 388       const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
 389 
 390       if (limit_exceeded && softrefs_clear) {
 391         *gc_overhead_limit_was_exceeded = true;
 392         size_policy()->set_gc_overhead_limit_exceeded(false);
 393         if (op.result() != NULL) {
 394           CollectedHeap::fill_with_object(op.result(), size);
 395         }
 396         return NULL;
 397       }
 398       assert(result == NULL || is_in_reserved(result),
 399              "result not in heap");
 400       return result;
 401     }
 402 
 403     // Give a warning if we seem to be looping forever.
 404     if ((QueuedAllocationWarningCount > 0) &&
 405         (try_count % QueuedAllocationWarningCount == 0)) {
 406           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 407                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 408     }
 409   }
 410 }
 411 
 412 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 413                                                bool is_tlab,
 414                                                bool first_only) {
 415   HeapWord* res = NULL;
 416 
 417   if (_young_gen->should_allocate(size, is_tlab)) {
 418     res = _young_gen->allocate(size, is_tlab);
 419     if (res != NULL || first_only) {
 420       return res;
 421     }
 422   }
 423 
 424   if (_old_gen->should_allocate(size, is_tlab)) {
 425     res = _old_gen->allocate(size, is_tlab);
 426   }
 427 
 428   return res;
 429 }
 430 
 431 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 432                                          bool* gc_overhead_limit_was_exceeded) {
 433   return mem_allocate_work(size,
 434                            false /* is_tlab */,
 435                            gc_overhead_limit_was_exceeded);
 436 }
 437 
 438 bool GenCollectedHeap::must_clear_all_soft_refs() {
 439   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 440          _gc_cause == GCCause::_wb_full_gc;
 441 }
 442 
 443 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 444                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
 445                                           bool restore_marks_for_biased_locking) {
 446   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 447   GCTraceTime(Trace, gc, phases) t1(title);
 448   TraceCollectorStats tcs(gen->counters());
 449   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
 450 
 451   gen->stat_record()->invocations++;
 452   gen->stat_record()->accumulated_time.start();
 453 
 454   // Must be done anew before each collection because
 455   // a previous collection will do mangling and will
 456   // change top of some spaces.
 457   record_gen_tops_before_GC();
 458 
 459   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 460 
 461   if (run_verification && VerifyBeforeGC) {
 462     Universe::verify("Before GC");
 463   }
 464   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 465 
 466   if (restore_marks_for_biased_locking) {
 467     // We perform this mark word preservation work lazily
 468     // because it's only at this point that we know whether we
 469     // absolutely have to do it; we want to avoid doing it for
 470     // scavenge-only collections where it's unnecessary
 471     BiasedLocking::preserve_marks();
 472   }
 473 
 474   // Do collection work
 475   {
 476     // Note on ref discovery: For what appear to be historical reasons,
 477     // GCH enables and disabled (by enqueing) refs discovery.
 478     // In the future this should be moved into the generation's
 479     // collect method so that ref discovery and enqueueing concerns
 480     // are local to a generation. The collect method could return
 481     // an appropriate indication in the case that notification on
 482     // the ref lock was needed. This will make the treatment of
 483     // weak refs more uniform (and indeed remove such concerns
 484     // from GCH). XXX
 485 
 486     save_marks();   // save marks for all gens
 487     // We want to discover references, but not process them yet.
 488     // This mode is disabled in process_discovered_references if the
 489     // generation does some collection work, or in
 490     // enqueue_discovered_references if the generation returns
 491     // without doing any work.
 492     ReferenceProcessor* rp = gen->ref_processor();
 493     // If the discovery of ("weak") refs in this generation is
 494     // atomic wrt other collectors in this configuration, we
 495     // are guaranteed to have empty discovered ref lists.
 496     if (rp->discovery_is_atomic()) {
 497       rp->enable_discovery();
 498       rp->setup_policy(clear_soft_refs);
 499     } else {
 500       // collect() below will enable discovery as appropriate
 501     }
 502     gen->collect(full, clear_soft_refs, size, is_tlab);
 503     if (!rp->enqueuing_is_done()) {
 504       rp->disable_discovery();
 505     } else {
 506       rp->set_enqueuing_is_done(false);
 507     }
 508     rp->verify_no_references_recorded();
 509   }
 510 
 511   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 512 
 513   gen->stat_record()->accumulated_time.stop();
 514 
 515   update_gc_stats(gen, full);
 516 
 517   if (run_verification && VerifyAfterGC) {
 518     Universe::verify("After GC");
 519   }
 520 }
 521 
 522 void GenCollectedHeap::do_collection(bool           full,
 523                                      bool           clear_all_soft_refs,
 524                                      size_t         size,
 525                                      bool           is_tlab,
 526                                      GenerationType max_generation) {
 527   ResourceMark rm;
 528   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 529 
 530   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 531   assert(my_thread->is_VM_thread(), "only VM thread");
 532   assert(Heap_lock->is_locked(),
 533          "the requesting thread should have the Heap_lock");
 534   guarantee(!is_gc_active(), "collection is not reentrant");
 535 
 536   if (GCLocker::check_active_before_gc()) {
 537     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 538   }
 539 
 540   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 541                           soft_ref_policy()->should_clear_all_soft_refs();
 542 
 543   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 544 
 545   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
 546 
 547   bool complete = full && (max_generation == OldGen);
 548   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 549   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 550 
 551   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 552 
 553   bool run_verification = total_collections() >= VerifyGCStartAt;
 554   bool prepared_for_verification = false;
 555   bool do_full_collection = false;
 556 
 557   if (do_young_collection) {
 558     GCIdMark gc_id_mark;
 559     GCTraceCPUTime tcpu;
 560     GCTraceTime(Info, gc) t("Pause Young", NULL, gc_cause(), true);
 561 
 562     print_heap_before_gc();
 563 
 564     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 565       prepare_for_verify();
 566       prepared_for_verification = true;
 567     }
 568 
 569     gc_prologue(complete);
 570     increment_total_collections(complete);
 571 
 572     collect_generation(_young_gen,
 573                        full,
 574                        size,
 575                        is_tlab,
 576                        run_verification && VerifyGCLevel <= 0,
 577                        do_clear_all_soft_refs,
 578                        false);
 579 
 580     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 581         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 582       // Allocation request was met by young GC.
 583       size = 0;
 584     }
 585 
 586     // Ask if young collection is enough. If so, do the final steps for young collection,
 587     // and fallthrough to the end.
 588     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 589     if (!do_full_collection) {
 590       // Adjust generation sizes.
 591       _young_gen->compute_new_size();
 592 
 593       print_heap_change(pre_gc_values);
 594 
 595       // Track memory usage and detect low memory after GC finishes
 596       MemoryService::track_memory_usage();
 597 
 598       gc_epilogue(complete);
 599     }
 600 
 601     print_heap_after_gc();
 602 
 603   } else {
 604     // No young collection, ask if we need to perform Full collection.
 605     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 606   }
 607 
 608   if (do_full_collection) {
 609     GCIdMark gc_id_mark;
 610     GCTraceCPUTime tcpu;
 611     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
 612 
 613     print_heap_before_gc();
 614 
 615     if (!prepared_for_verification && run_verification &&
 616         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 617       prepare_for_verify();
 618     }
 619 
 620     if (!do_young_collection) {
 621       gc_prologue(complete);
 622       increment_total_collections(complete);
 623     }
 624 
 625     // Accounting quirk: total full collections would be incremented when "complete"
 626     // is set, by calling increment_total_collections above. However, we also need to
 627     // account Full collections that had "complete" unset.
 628     if (!complete) {
 629       increment_total_full_collections();
 630     }
 631 
 632     collect_generation(_old_gen,
 633                        full,
 634                        size,
 635                        is_tlab,
 636                        run_verification && VerifyGCLevel <= 1,
 637                        do_clear_all_soft_refs,
 638                        true);
 639 
 640     // Adjust generation sizes.
 641     _old_gen->compute_new_size();
 642     _young_gen->compute_new_size();
 643 
 644     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 645     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 646     DEBUG_ONLY(MetaspaceUtils::verify();)
 647     // Resize the metaspace capacity after full collections
 648     MetaspaceGC::compute_new_size();
 649     update_full_collections_completed();
 650 
 651     print_heap_change(pre_gc_values);
 652 
 653     // Track memory usage and detect low memory after GC finishes
 654     MemoryService::track_memory_usage();
 655 
 656     // Need to tell the epilogue code we are done with Full GC, regardless what was
 657     // the initial value for "complete" flag.
 658     gc_epilogue(true);
 659 
 660     BiasedLocking::restore_marks();
 661 
 662     print_heap_after_gc();
 663   }
 664 }
 665 
 666 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 667                                                  GenCollectedHeap::GenerationType max_gen) const {
 668   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 669 }
 670 
 671 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 672   ScavengableNMethods::register_nmethod(nm);
 673 }
 674 
 675 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 676   ScavengableNMethods::unregister_nmethod(nm);
 677 }
 678 
 679 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 680   ScavengableNMethods::verify_nmethod(nm);
 681 }
 682 
 683 void GenCollectedHeap::flush_nmethod(nmethod* nm) {
 684   // Do nothing.
 685 }
 686 
 687 void GenCollectedHeap::prune_scavengable_nmethods() {
 688   ScavengableNMethods::prune_nmethods();
 689 }
 690 
 691 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 692   GCCauseSetter x(this, GCCause::_allocation_failure);
 693   HeapWord* result = NULL;
 694 
 695   assert(size != 0, "Precondition violated");
 696   if (GCLocker::is_active_and_needs_gc()) {
 697     // GC locker is active; instead of a collection we will attempt
 698     // to expand the heap, if there's room for expansion.
 699     if (!is_maximal_no_gc()) {
 700       result = expand_heap_and_allocate(size, is_tlab);
 701     }
 702     return result;   // Could be null if we are out of space.
 703   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 704     // Do an incremental collection.
 705     do_collection(false,                     // full
 706                   false,                     // clear_all_soft_refs
 707                   size,                      // size
 708                   is_tlab,                   // is_tlab
 709                   GenCollectedHeap::OldGen); // max_generation
 710   } else {
 711     log_trace(gc)(" :: Trying full because partial may fail :: ");
 712     // Try a full collection; see delta for bug id 6266275
 713     // for the original code and why this has been simplified
 714     // with from-space allocation criteria modified and
 715     // such allocation moved out of the safepoint path.
 716     do_collection(true,                      // full
 717                   false,                     // clear_all_soft_refs
 718                   size,                      // size
 719                   is_tlab,                   // is_tlab
 720                   GenCollectedHeap::OldGen); // max_generation
 721   }
 722 
 723   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 724 
 725   if (result != NULL) {
 726     assert(is_in_reserved(result), "result not in heap");
 727     return result;
 728   }
 729 
 730   // OK, collection failed, try expansion.
 731   result = expand_heap_and_allocate(size, is_tlab);
 732   if (result != NULL) {
 733     return result;
 734   }
 735 
 736   // If we reach this point, we're really out of memory. Try every trick
 737   // we can to reclaim memory. Force collection of soft references. Force
 738   // a complete compaction of the heap. Any additional methods for finding
 739   // free memory should be here, especially if they are expensive. If this
 740   // attempt fails, an OOM exception will be thrown.
 741   {
 742     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 743 
 744     do_collection(true,                      // full
 745                   true,                      // clear_all_soft_refs
 746                   size,                      // size
 747                   is_tlab,                   // is_tlab
 748                   GenCollectedHeap::OldGen); // max_generation
 749   }
 750 
 751   result = attempt_allocation(size, is_tlab, false /* first_only */);
 752   if (result != NULL) {
 753     assert(is_in_reserved(result), "result not in heap");
 754     return result;
 755   }
 756 
 757   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 758     "Flag should have been handled and cleared prior to this point");
 759 
 760   // What else?  We might try synchronous finalization later.  If the total
 761   // space available is large enough for the allocation, then a more
 762   // complete compaction phase than we've tried so far might be
 763   // appropriate.
 764   return NULL;
 765 }
 766 
 767 #ifdef ASSERT
 768 class AssertNonScavengableClosure: public OopClosure {
 769 public:
 770   virtual void do_oop(oop* p) {
 771     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 772       "Referent should not be scavengable.");  }
 773   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 774 };
 775 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 776 #endif
 777 
 778 void GenCollectedHeap::process_roots(ScanningOption so,
 779                                      OopClosure* strong_roots,
 780                                      CLDClosure* strong_cld_closure,
 781                                      CLDClosure* weak_cld_closure,
 782                                      CodeBlobToOopClosure* code_roots) {
 783   // General roots.
 784   assert(code_roots != NULL, "code root closure should always be set");
 785 
 786   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 787 
 788   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 789   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 790 
 791   Threads::oops_do(strong_roots, roots_from_code_p);
 792 
 793   OopStorageSet::strong_oops_do(strong_roots);
 794 
 795   if (so & SO_ScavengeCodeCache) {
 796     assert(code_roots != NULL, "must supply closure for code cache");
 797 
 798     // We only visit parts of the CodeCache when scavenging.
 799     ScavengableNMethods::nmethods_do(code_roots);
 800   }
 801   if (so & SO_AllCodeCache) {
 802     assert(code_roots != NULL, "must supply closure for code cache");
 803 
 804     // CMSCollector uses this to do intermediate-strength collections.
 805     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 806     CodeCache::blobs_do(code_roots);
 807   }
 808   // Verify that the code cache contents are not subject to
 809   // movement by a scavenging collection.
 810   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 811   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 812 }
 813 
 814 void GenCollectedHeap::full_process_roots(bool is_adjust_phase,
 815                                           ScanningOption so,
 816                                           bool only_strong_roots,
 817                                           OopClosure* root_closure,
 818                                           CLDClosure* cld_closure) {
 819   MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
 820   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 821 
 822   process_roots(so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
 823 }
 824 
 825 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 826   WeakProcessor::oops_do(root_closure);
 827   _young_gen->ref_processor()->weak_oops_do(root_closure);
 828   _old_gen->ref_processor()->weak_oops_do(root_closure);
 829 }
 830 
 831 bool GenCollectedHeap::no_allocs_since_save_marks() {
 832   return _young_gen->no_allocs_since_save_marks() &&
 833          _old_gen->no_allocs_since_save_marks();
 834 }
 835 
 836 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 837   return _young_gen->supports_inline_contig_alloc();
 838 }
 839 
 840 HeapWord* volatile* GenCollectedHeap::top_addr() const {
 841   return _young_gen->top_addr();
 842 }
 843 
 844 HeapWord** GenCollectedHeap::end_addr() const {
 845   return _young_gen->end_addr();
 846 }
 847 
 848 // public collection interfaces
 849 
 850 void GenCollectedHeap::collect(GCCause::Cause cause) {
 851   if ((cause == GCCause::_wb_young_gc) ||
 852       (cause == GCCause::_gc_locker)) {
 853     // Young collection for WhiteBox or GCLocker.
 854     collect(cause, YoungGen);
 855   } else {
 856 #ifdef ASSERT
 857   if (cause == GCCause::_scavenge_alot) {
 858     // Young collection only.
 859     collect(cause, YoungGen);
 860   } else {
 861     // Stop-the-world full collection.
 862     collect(cause, OldGen);
 863   }
 864 #else
 865     // Stop-the-world full collection.
 866     collect(cause, OldGen);
 867 #endif
 868   }
 869 }
 870 
 871 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
 872   // The caller doesn't have the Heap_lock
 873   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 874   MutexLocker ml(Heap_lock);
 875   collect_locked(cause, max_generation);
 876 }
 877 
 878 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 879   // The caller has the Heap_lock
 880   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 881   collect_locked(cause, OldGen);
 882 }
 883 
 884 // this is the private collection interface
 885 // The Heap_lock is expected to be held on entry.
 886 
 887 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
 888   // Read the GC count while holding the Heap_lock
 889   unsigned int gc_count_before      = total_collections();
 890   unsigned int full_gc_count_before = total_full_collections();
 891 
 892   if (GCLocker::should_discard(cause, gc_count_before)) {
 893     return;
 894   }
 895 
 896   {
 897     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 898     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 899                          cause, max_generation);
 900     VMThread::execute(&op);
 901   }
 902 }
 903 
 904 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 905    do_full_collection(clear_all_soft_refs, OldGen);
 906 }
 907 
 908 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 909                                           GenerationType last_generation) {
 910   do_collection(true,                   // full
 911                 clear_all_soft_refs,    // clear_all_soft_refs
 912                 0,                      // size
 913                 false,                  // is_tlab
 914                 last_generation);       // last_generation
 915   // Hack XXX FIX ME !!!
 916   // A scavenge may not have been attempted, or may have
 917   // been attempted and failed, because the old gen was too full
 918   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 919     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 920     // This time allow the old gen to be collected as well
 921     do_collection(true,                // full
 922                   clear_all_soft_refs, // clear_all_soft_refs
 923                   0,                   // size
 924                   false,               // is_tlab
 925                   OldGen);             // last_generation
 926   }
 927 }
 928 
 929 bool GenCollectedHeap::is_in_young(oop p) {
 930   bool result = cast_from_oop<HeapWord*>(p) < _old_gen->reserved().start();
 931   assert(result == _young_gen->is_in_reserved(p),
 932          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
 933   return result;
 934 }
 935 
 936 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 937 bool GenCollectedHeap::is_in(const void* p) const {
 938   return _young_gen->is_in(p) || _old_gen->is_in(p);
 939 }
 940 
 941 #ifdef ASSERT
 942 // Don't implement this by using is_in_young().  This method is used
 943 // in some cases to check that is_in_young() is correct.
 944 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 945   assert(is_in_reserved(p) || p == NULL,
 946     "Does not work if address is non-null and outside of the heap");
 947   return p < _young_gen->reserved().end() && p != NULL;
 948 }
 949 #endif
 950 
 951 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
 952   _young_gen->oop_iterate(cl);
 953   _old_gen->oop_iterate(cl);
 954 }
 955 
 956 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 957   _young_gen->object_iterate(cl);
 958   _old_gen->object_iterate(cl);
 959 }
 960 
 961 Space* GenCollectedHeap::space_containing(const void* addr) const {
 962   Space* res = _young_gen->space_containing(addr);
 963   if (res != NULL) {
 964     return res;
 965   }
 966   res = _old_gen->space_containing(addr);
 967   assert(res != NULL, "Could not find containing space");
 968   return res;
 969 }
 970 
 971 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 972   assert(is_in_reserved(addr), "block_start of address outside of heap");
 973   if (_young_gen->is_in_reserved(addr)) {
 974     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 975     return _young_gen->block_start(addr);
 976   }
 977 
 978   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 979   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 980   return _old_gen->block_start(addr);
 981 }
 982 
 983 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 984   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 985   assert(block_start(addr) == addr, "addr must be a block start");
 986   if (_young_gen->is_in_reserved(addr)) {
 987     return _young_gen->block_is_obj(addr);
 988   }
 989 
 990   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 991   return _old_gen->block_is_obj(addr);
 992 }
 993 
 994 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 995   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 996   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 997   return _young_gen->tlab_capacity();
 998 }
 999 
1000 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
1001   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1002   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
1003   return _young_gen->tlab_used();
1004 }
1005 
1006 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
1007   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
1008   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
1009   return _young_gen->unsafe_max_tlab_alloc();
1010 }
1011 
1012 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
1013                                               size_t requested_size,
1014                                               size_t* actual_size) {
1015   bool gc_overhead_limit_was_exceeded;
1016   HeapWord* result = mem_allocate_work(requested_size /* size */,
1017                                        true /* is_tlab */,
1018                                        &gc_overhead_limit_was_exceeded);
1019   if (result != NULL) {
1020     *actual_size = requested_size;
1021   }
1022 
1023   return result;
1024 }
1025 
1026 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1027 // from the list headed by "*prev_ptr".
1028 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1029   bool first = true;
1030   size_t min_size = 0;   // "first" makes this conceptually infinite.
1031   ScratchBlock **smallest_ptr, *smallest;
1032   ScratchBlock  *cur = *prev_ptr;
1033   while (cur) {
1034     assert(*prev_ptr == cur, "just checking");
1035     if (first || cur->num_words < min_size) {
1036       smallest_ptr = prev_ptr;
1037       smallest     = cur;
1038       min_size     = smallest->num_words;
1039       first        = false;
1040     }
1041     prev_ptr = &cur->next;
1042     cur     =  cur->next;
1043   }
1044   smallest      = *smallest_ptr;
1045   *smallest_ptr = smallest->next;
1046   return smallest;
1047 }
1048 
1049 // Sort the scratch block list headed by res into decreasing size order,
1050 // and set "res" to the result.
1051 static void sort_scratch_list(ScratchBlock*& list) {
1052   ScratchBlock* sorted = NULL;
1053   ScratchBlock* unsorted = list;
1054   while (unsorted) {
1055     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1056     smallest->next  = sorted;
1057     sorted          = smallest;
1058   }
1059   list = sorted;
1060 }
1061 
1062 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1063                                                size_t max_alloc_words) {
1064   ScratchBlock* res = NULL;
1065   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1066   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1067   sort_scratch_list(res);
1068   return res;
1069 }
1070 
1071 void GenCollectedHeap::release_scratch() {
1072   _young_gen->reset_scratch();
1073   _old_gen->reset_scratch();
1074 }
1075 
1076 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1077   void do_generation(Generation* gen) {
1078     gen->prepare_for_verify();
1079   }
1080 };
1081 
1082 void GenCollectedHeap::prepare_for_verify() {
1083   ensure_parsability(false);        // no need to retire TLABs
1084   GenPrepareForVerifyClosure blk;
1085   generation_iterate(&blk, false);
1086 }
1087 
1088 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1089                                           bool old_to_young) {
1090   if (old_to_young) {
1091     cl->do_generation(_old_gen);
1092     cl->do_generation(_young_gen);
1093   } else {
1094     cl->do_generation(_young_gen);
1095     cl->do_generation(_old_gen);
1096   }
1097 }
1098 
1099 bool GenCollectedHeap::is_maximal_no_gc() const {
1100   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1101 }
1102 
1103 void GenCollectedHeap::save_marks() {
1104   _young_gen->save_marks();
1105   _old_gen->save_marks();
1106 }
1107 
1108 GenCollectedHeap* GenCollectedHeap::heap() {
1109   // SerialHeap is the only subtype of GenCollectedHeap.
1110   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
1111 }
1112 
1113 #if INCLUDE_SERIALGC
1114 void GenCollectedHeap::prepare_for_compaction() {
1115   // Start by compacting into same gen.
1116   CompactPoint cp(_old_gen);
1117   _forwarding->clear();
1118   _old_gen->prepare_for_compaction(&cp);
1119   _young_gen->prepare_for_compaction(&cp);
1120 }
1121 #endif // INCLUDE_SERIALGC
1122 
1123 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1124   log_debug(gc, verify)("%s", _old_gen->name());
1125   _old_gen->verify();
1126 
1127   log_debug(gc, verify)("%s", _old_gen->name());
1128   _young_gen->verify();
1129 
1130   log_debug(gc, verify)("RemSet");
1131   rem_set()->verify();
1132 }
1133 
1134 void GenCollectedHeap::print_on(outputStream* st) const {
1135   if (_young_gen != NULL) {
1136     _young_gen->print_on(st);
1137   }
1138   if (_old_gen != NULL) {
1139     _old_gen->print_on(st);
1140   }
1141   MetaspaceUtils::print_on(st);
1142 }
1143 
1144 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1145 }
1146 
1147 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
1148   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
1149 }
1150 
1151 void GenCollectedHeap::print_tracing_info() const {
1152   if (log_is_enabled(Debug, gc, heap, exit)) {
1153     LogStreamHandle(Debug, gc, heap, exit) lsh;
1154     _young_gen->print_summary_info_on(&lsh);
1155     _old_gen->print_summary_info_on(&lsh);
1156   }
1157 }
1158 
1159 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
1160   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
1161 
1162   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
1163                      HEAP_CHANGE_FORMAT" "
1164                      HEAP_CHANGE_FORMAT,
1165                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1166                                              pre_gc_values.young_gen_used(),
1167                                              pre_gc_values.young_gen_capacity(),
1168                                              def_new_gen->used(),
1169                                              def_new_gen->capacity()),
1170                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1171                                              pre_gc_values.eden_used(),
1172                                              pre_gc_values.eden_capacity(),
1173                                              def_new_gen->eden()->used(),
1174                                              def_new_gen->eden()->capacity()),
1175                      HEAP_CHANGE_FORMAT_ARGS("From",
1176                                              pre_gc_values.from_used(),
1177                                              pre_gc_values.from_capacity(),
1178                                              def_new_gen->from()->used(),
1179                                              def_new_gen->from()->capacity()));
1180   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1181                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1182                                              pre_gc_values.old_gen_used(),
1183                                              pre_gc_values.old_gen_capacity(),
1184                                              old_gen()->used(),
1185                                              old_gen()->capacity()));
1186   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1187 }
1188 
1189 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1190  private:
1191   bool _full;
1192  public:
1193   void do_generation(Generation* gen) {
1194     gen->gc_prologue(_full);
1195   }
1196   GenGCPrologueClosure(bool full) : _full(full) {};
1197 };
1198 
1199 void GenCollectedHeap::gc_prologue(bool full) {
1200   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1201 
1202   // Fill TLAB's and such
1203   ensure_parsability(true);   // retire TLABs
1204 
1205   // Walk generations
1206   GenGCPrologueClosure blk(full);
1207   generation_iterate(&blk, false);  // not old-to-young.
1208 };
1209 
1210 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1211  private:
1212   bool _full;
1213  public:
1214   void do_generation(Generation* gen) {
1215     gen->gc_epilogue(_full);
1216   }
1217   GenGCEpilogueClosure(bool full) : _full(full) {};
1218 };
1219 
1220 void GenCollectedHeap::gc_epilogue(bool full) {
1221 #if COMPILER2_OR_JVMCI
1222   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1223   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1224   guarantee(!CompilerConfig::is_c2_or_jvmci_compiler_enabled() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1225 #endif // COMPILER2_OR_JVMCI
1226 
1227   resize_all_tlabs();
1228 
1229   GenGCEpilogueClosure blk(full);
1230   generation_iterate(&blk, false);  // not old-to-young.
1231 
1232   MetaspaceCounters::update_performance_counters();
1233 };
1234 
1235 #ifndef PRODUCT
1236 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1237  private:
1238  public:
1239   void do_generation(Generation* gen) {
1240     gen->record_spaces_top();
1241   }
1242 };
1243 
1244 void GenCollectedHeap::record_gen_tops_before_GC() {
1245   if (ZapUnusedHeapArea) {
1246     GenGCSaveTopsBeforeGCClosure blk;
1247     generation_iterate(&blk, false);  // not old-to-young.
1248   }
1249 }
1250 #endif  // not PRODUCT
1251 
1252 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1253  public:
1254   void do_generation(Generation* gen) {
1255     gen->ensure_parsability();
1256   }
1257 };
1258 
1259 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1260   CollectedHeap::ensure_parsability(retire_tlabs);
1261   GenEnsureParsabilityClosure ep_cl;
1262   generation_iterate(&ep_cl, false);
1263 }
1264 
1265 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1266                                               oop obj,
1267                                               size_t obj_size) {
1268   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1269   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1270   HeapWord* result = NULL;
1271 
1272   result = old_gen->expand_and_allocate(obj_size, false);
1273 
1274   if (result != NULL) {
1275     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size);
1276   }
1277   return cast_to_oop(result);
1278 }