1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/stringTable.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/icBuffer.hpp"
  32 #include "compiler/oopMap.hpp"
  33 #include "gc/serial/defNewGeneration.hpp"
  34 #include "gc/shared/adaptiveSizePolicy.hpp"
  35 #include "gc/shared/cardTableBarrierSet.hpp"
  36 #include "gc/shared/cardTableRS.hpp"
  37 #include "gc/shared/collectedHeap.inline.hpp"
  38 #include "gc/shared/collectorCounters.hpp"
  39 #include "gc/shared/gcId.hpp"
  40 #include "gc/shared/gcLocker.hpp"
  41 #include "gc/shared/gcPolicyCounters.hpp"
  42 #include "gc/shared/gcTrace.hpp"
  43 #include "gc/shared/gcTraceTime.inline.hpp"
  44 #include "gc/shared/genArguments.hpp"
  45 #include "gc/shared/gcVMOperations.hpp"
  46 #include "gc/shared/genCollectedHeap.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/generationSpec.hpp"
  49 #include "gc/shared/gcInitLogger.hpp"
  50 #include "gc/shared/locationPrinter.inline.hpp"
  51 #include "gc/shared/oopStorage.inline.hpp"
  52 #include "gc/shared/oopStorageSet.inline.hpp"
  53 #include "gc/shared/oopStorageParState.inline.hpp"
  54 #include "gc/shared/scavengableNMethods.hpp"
  55 #include "gc/shared/space.hpp"
  56 #include "gc/shared/strongRootsScope.hpp"
  57 #include "gc/shared/weakProcessor.hpp"
  58 #include "gc/shared/workgroup.hpp"
  59 #include "memory/iterator.hpp"
  60 #include "memory/metaspaceCounters.hpp"
  61 #include "memory/metaspaceUtils.hpp"
  62 #include "memory/resourceArea.hpp"
  63 #include "memory/universe.hpp"
  64 #include "oops/oop.inline.hpp"
  65 #include "runtime/handles.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/java.hpp"
  68 #include "runtime/vmThread.hpp"
  69 #include "services/memoryService.hpp"
  70 #include "utilities/autoRestore.hpp"
  71 #include "utilities/debug.hpp"
  72 #include "utilities/formatBuffer.hpp"
  73 #include "utilities/macros.hpp"
  74 #include "utilities/stack.inline.hpp"
  75 #include "utilities/vmError.hpp"
  76 #if INCLUDE_JVMCI
  77 #include "jvmci/jvmci.hpp"
  78 #endif
  79 
  80 GenCollectedHeap::GenCollectedHeap(Generation::Name young,
  81                                    Generation::Name old,
  82                                    const char* policy_counters_name) :
  83   CollectedHeap(),
  84   _young_gen(NULL),
  85   _old_gen(NULL),
  86   _young_gen_spec(new GenerationSpec(young,
  87                                      NewSize,
  88                                      MaxNewSize,
  89                                      GenAlignment)),
  90   _old_gen_spec(new GenerationSpec(old,
  91                                    OldSize,
  92                                    MaxOldSize,
  93                                    GenAlignment)),
  94   _rem_set(NULL),
  95   _soft_ref_gen_policy(),
  96   _size_policy(NULL),
  97   _gc_policy_counters(new GCPolicyCounters(policy_counters_name, 2, 2)),
  98   _incremental_collection_failed(false),
  99   _full_collections_completed(0),
 100   _young_manager(NULL),
 101   _old_manager(NULL) {
 102 }
 103 
 104 jint GenCollectedHeap::initialize() {
 105   // While there are no constraints in the GC code that HeapWordSize
 106   // be any particular value, there are multiple other areas in the
 107   // system which believe this to be true (e.g. oop->object_size in some
 108   // cases incorrectly returns the size in wordSize units rather than
 109   // HeapWordSize).
 110   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
 111 
 112   // Allocate space for the heap.
 113 
 114   ReservedHeapSpace heap_rs = allocate(HeapAlignment);
 115 
 116   if (!heap_rs.is_reserved()) {
 117     vm_shutdown_during_initialization(
 118       "Could not reserve enough space for object heap");
 119     return JNI_ENOMEM;
 120   }
 121 
 122   initialize_reserved_region(heap_rs);
 123 
 124   _rem_set = create_rem_set(heap_rs.region());
 125   _rem_set->initialize();
 126   CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
 127   bs->initialize();
 128   BarrierSet::set_barrier_set(bs);
 129 
 130   ReservedSpace young_rs = heap_rs.first_part(_young_gen_spec->max_size());
 131   _young_gen = _young_gen_spec->init(young_rs, rem_set());
 132   ReservedSpace old_rs = heap_rs.last_part(_young_gen_spec->max_size());
 133 
 134   old_rs = old_rs.first_part(_old_gen_spec->max_size());
 135   _old_gen = _old_gen_spec->init(old_rs, rem_set());
 136 
 137   GCInitLogger::print();
 138 
 139   return JNI_OK;
 140 }
 141 
 142 CardTableRS* GenCollectedHeap::create_rem_set(const MemRegion& reserved_region) {
 143   return new CardTableRS(reserved_region);
 144 }
 145 
 146 void GenCollectedHeap::initialize_size_policy(size_t init_eden_size,
 147                                               size_t init_promo_size,
 148                                               size_t init_survivor_size) {
 149   const double max_gc_pause_sec = ((double) MaxGCPauseMillis) / 1000.0;
 150   _size_policy = new AdaptiveSizePolicy(init_eden_size,
 151                                         init_promo_size,
 152                                         init_survivor_size,
 153                                         max_gc_pause_sec,
 154                                         GCTimeRatio);
 155 }
 156 
 157 ReservedHeapSpace GenCollectedHeap::allocate(size_t alignment) {
 158   // Now figure out the total size.
 159   const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size();
 160   assert(alignment % pageSize == 0, "Must be");
 161 
 162   // Check for overflow.
 163   size_t total_reserved = _young_gen_spec->max_size() + _old_gen_spec->max_size();
 164   if (total_reserved < _young_gen_spec->max_size()) {
 165     vm_exit_during_initialization("The size of the object heap + VM data exceeds "
 166                                   "the maximum representable size");
 167   }
 168   assert(total_reserved % alignment == 0,
 169          "Gen size; total_reserved=" SIZE_FORMAT ", alignment="
 170          SIZE_FORMAT, total_reserved, alignment);
 171 
 172   ReservedHeapSpace heap_rs = Universe::reserve_heap(total_reserved, alignment);
 173   size_t used_page_size = heap_rs.page_size();
 174 
 175   os::trace_page_sizes("Heap",
 176                        MinHeapSize,
 177                        total_reserved,
 178                        used_page_size,
 179                        heap_rs.base(),
 180                        heap_rs.size());
 181 
 182   return heap_rs;
 183 }
 184 
 185 class GenIsScavengable : public BoolObjectClosure {
 186 public:
 187   bool do_object_b(oop obj) {
 188     return GenCollectedHeap::heap()->is_in_young(obj);
 189   }
 190 };
 191 
 192 static GenIsScavengable _is_scavengable;
 193 
 194 void GenCollectedHeap::post_initialize() {
 195   CollectedHeap::post_initialize();
 196   ref_processing_init();
 197 
 198   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 199 
 200   initialize_size_policy(def_new_gen->eden()->capacity(),
 201                          _old_gen->capacity(),
 202                          def_new_gen->from()->capacity());
 203 
 204   MarkSweep::initialize();
 205 
 206   ScavengableNMethods::initialize(&_is_scavengable);
 207 }
 208 
 209 void GenCollectedHeap::ref_processing_init() {
 210   _young_gen->ref_processor_init();
 211   _old_gen->ref_processor_init();
 212 }
 213 
 214 PreGenGCValues GenCollectedHeap::get_pre_gc_values() const {
 215   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
 216 
 217   return PreGenGCValues(def_new_gen->used(),
 218                         def_new_gen->capacity(),
 219                         def_new_gen->eden()->used(),
 220                         def_new_gen->eden()->capacity(),
 221                         def_new_gen->from()->used(),
 222                         def_new_gen->from()->capacity(),
 223                         old_gen()->used(),
 224                         old_gen()->capacity());
 225 }
 226 
 227 GenerationSpec* GenCollectedHeap::young_gen_spec() const {
 228   return _young_gen_spec;
 229 }
 230 
 231 GenerationSpec* GenCollectedHeap::old_gen_spec() const {
 232   return _old_gen_spec;
 233 }
 234 
 235 size_t GenCollectedHeap::capacity() const {
 236   return _young_gen->capacity() + _old_gen->capacity();
 237 }
 238 
 239 size_t GenCollectedHeap::used() const {
 240   return _young_gen->used() + _old_gen->used();
 241 }
 242 
 243 void GenCollectedHeap::save_used_regions() {
 244   _old_gen->save_used_region();
 245   _young_gen->save_used_region();
 246 }
 247 
 248 size_t GenCollectedHeap::max_capacity() const {
 249   return _young_gen->max_capacity() + _old_gen->max_capacity();
 250 }
 251 
 252 // Update the _full_collections_completed counter
 253 // at the end of a stop-world full GC.
 254 unsigned int GenCollectedHeap::update_full_collections_completed() {
 255   assert(_full_collections_completed <= _total_full_collections,
 256          "Can't complete more collections than were started");
 257   _full_collections_completed = _total_full_collections;
 258   return _full_collections_completed;
 259 }
 260 
 261 // Return true if any of the following is true:
 262 // . the allocation won't fit into the current young gen heap
 263 // . gc locker is occupied (jni critical section)
 264 // . heap memory is tight -- the most recent previous collection
 265 //   was a full collection because a partial collection (would
 266 //   have) failed and is likely to fail again
 267 bool GenCollectedHeap::should_try_older_generation_allocation(size_t word_size) const {
 268   size_t young_capacity = _young_gen->capacity_before_gc();
 269   return    (word_size > heap_word_size(young_capacity))
 270          || GCLocker::is_active_and_needs_gc()
 271          || incremental_collection_failed();
 272 }
 273 
 274 HeapWord* GenCollectedHeap::expand_heap_and_allocate(size_t size, bool   is_tlab) {
 275   HeapWord* result = NULL;
 276   if (_old_gen->should_allocate(size, is_tlab)) {
 277     result = _old_gen->expand_and_allocate(size, is_tlab);
 278   }
 279   if (result == NULL) {
 280     if (_young_gen->should_allocate(size, is_tlab)) {
 281       result = _young_gen->expand_and_allocate(size, is_tlab);
 282     }
 283   }
 284   assert(result == NULL || is_in_reserved(result), "result not in heap");
 285   return result;
 286 }
 287 
 288 HeapWord* GenCollectedHeap::mem_allocate_work(size_t size,
 289                                               bool is_tlab,
 290                                               bool* gc_overhead_limit_was_exceeded) {
 291   // In general gc_overhead_limit_was_exceeded should be false so
 292   // set it so here and reset it to true only if the gc time
 293   // limit is being exceeded as checked below.
 294   *gc_overhead_limit_was_exceeded = false;
 295 
 296   HeapWord* result = NULL;
 297 
 298   // Loop until the allocation is satisfied, or unsatisfied after GC.
 299   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 300 
 301     // First allocation attempt is lock-free.
 302     Generation *young = _young_gen;
 303     assert(young->supports_inline_contig_alloc(),
 304       "Otherwise, must do alloc within heap lock");
 305     if (young->should_allocate(size, is_tlab)) {
 306       result = young->par_allocate(size, is_tlab);
 307       if (result != NULL) {
 308         assert(is_in_reserved(result), "result not in heap");
 309         return result;
 310       }
 311     }
 312     uint gc_count_before;  // Read inside the Heap_lock locked region.
 313     {
 314       MutexLocker ml(Heap_lock);
 315       log_trace(gc, alloc)("GenCollectedHeap::mem_allocate_work: attempting locked slow path allocation");
 316       // Note that only large objects get a shot at being
 317       // allocated in later generations.
 318       bool first_only = !should_try_older_generation_allocation(size);
 319 
 320       result = attempt_allocation(size, is_tlab, first_only);
 321       if (result != NULL) {
 322         assert(is_in_reserved(result), "result not in heap");
 323         return result;
 324       }
 325 
 326       if (GCLocker::is_active_and_needs_gc()) {
 327         if (is_tlab) {
 328           return NULL;  // Caller will retry allocating individual object.
 329         }
 330         if (!is_maximal_no_gc()) {
 331           // Try and expand heap to satisfy request.
 332           result = expand_heap_and_allocate(size, is_tlab);
 333           // Result could be null if we are out of space.
 334           if (result != NULL) {
 335             return result;
 336           }
 337         }
 338 
 339         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
 340           return NULL; // We didn't get to do a GC and we didn't get any memory.
 341         }
 342 
 343         // If this thread is not in a jni critical section, we stall
 344         // the requestor until the critical section has cleared and
 345         // GC allowed. When the critical section clears, a GC is
 346         // initiated by the last thread exiting the critical section; so
 347         // we retry the allocation sequence from the beginning of the loop,
 348         // rather than causing more, now probably unnecessary, GC attempts.
 349         JavaThread* jthr = JavaThread::current();
 350         if (!jthr->in_critical()) {
 351           MutexUnlocker mul(Heap_lock);
 352           // Wait for JNI critical section to be exited
 353           GCLocker::stall_until_clear();
 354           gclocker_stalled_count += 1;
 355           continue;
 356         } else {
 357           if (CheckJNICalls) {
 358             fatal("Possible deadlock due to allocating while"
 359                   " in jni critical section");
 360           }
 361           return NULL;
 362         }
 363       }
 364 
 365       // Read the gc count while the heap lock is held.
 366       gc_count_before = total_collections();
 367     }
 368 
 369     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 370     VMThread::execute(&op);
 371     if (op.prologue_succeeded()) {
 372       result = op.result();
 373       if (op.gc_locked()) {
 374          assert(result == NULL, "must be NULL if gc_locked() is true");
 375          continue;  // Retry and/or stall as necessary.
 376       }
 377 
 378       // Allocation has failed and a collection
 379       // has been done.  If the gc time limit was exceeded the
 380       // this time, return NULL so that an out-of-memory
 381       // will be thrown.  Clear gc_overhead_limit_exceeded
 382       // so that the overhead exceeded does not persist.
 383 
 384       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 385       const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
 386 
 387       if (limit_exceeded && softrefs_clear) {
 388         *gc_overhead_limit_was_exceeded = true;
 389         size_policy()->set_gc_overhead_limit_exceeded(false);
 390         if (op.result() != NULL) {
 391           CollectedHeap::fill_with_object(op.result(), size);
 392         }
 393         return NULL;
 394       }
 395       assert(result == NULL || is_in_reserved(result),
 396              "result not in heap");
 397       return result;
 398     }
 399 
 400     // Give a warning if we seem to be looping forever.
 401     if ((QueuedAllocationWarningCount > 0) &&
 402         (try_count % QueuedAllocationWarningCount == 0)) {
 403           log_warning(gc, ergo)("GenCollectedHeap::mem_allocate_work retries %d times,"
 404                                 " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
 405     }
 406   }
 407 }
 408 
 409 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
 410                                                bool is_tlab,
 411                                                bool first_only) {
 412   HeapWord* res = NULL;
 413 
 414   if (_young_gen->should_allocate(size, is_tlab)) {
 415     res = _young_gen->allocate(size, is_tlab);
 416     if (res != NULL || first_only) {
 417       return res;
 418     }
 419   }
 420 
 421   if (_old_gen->should_allocate(size, is_tlab)) {
 422     res = _old_gen->allocate(size, is_tlab);
 423   }
 424 
 425   return res;
 426 }
 427 
 428 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
 429                                          bool* gc_overhead_limit_was_exceeded) {
 430   return mem_allocate_work(size,
 431                            false /* is_tlab */,
 432                            gc_overhead_limit_was_exceeded);
 433 }
 434 
 435 bool GenCollectedHeap::must_clear_all_soft_refs() {
 436   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
 437          _gc_cause == GCCause::_wb_full_gc;
 438 }
 439 
 440 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
 441                                           bool is_tlab, bool run_verification, bool clear_soft_refs) {
 442   FormatBuffer<> title("Collect gen: %s", gen->short_name());
 443   GCTraceTime(Trace, gc, phases) t1(title);
 444   TraceCollectorStats tcs(gen->counters());
 445   TraceMemoryManagerStats tmms(gen->gc_manager(), gc_cause());
 446 
 447   gen->stat_record()->invocations++;
 448   gen->stat_record()->accumulated_time.start();
 449 
 450   // Must be done anew before each collection because
 451   // a previous collection will do mangling and will
 452   // change top of some spaces.
 453   record_gen_tops_before_GC();
 454 
 455   log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize);
 456 
 457   if (run_verification && VerifyBeforeGC) {
 458     Universe::verify("Before GC");
 459   }
 460   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear());
 461 
 462   // Do collection work
 463   {
 464     // Note on ref discovery: For what appear to be historical reasons,
 465     // GCH enables and disabled (by enqueing) refs discovery.
 466     // In the future this should be moved into the generation's
 467     // collect method so that ref discovery and enqueueing concerns
 468     // are local to a generation. The collect method could return
 469     // an appropriate indication in the case that notification on
 470     // the ref lock was needed. This will make the treatment of
 471     // weak refs more uniform (and indeed remove such concerns
 472     // from GCH). XXX
 473 
 474     save_marks();   // save marks for all gens
 475     // We want to discover references, but not process them yet.
 476     // This mode is disabled in process_discovered_references if the
 477     // generation does some collection work, or in
 478     // enqueue_discovered_references if the generation returns
 479     // without doing any work.
 480     ReferenceProcessor* rp = gen->ref_processor();
 481     rp->start_discovery(clear_soft_refs);
 482 
 483     gen->collect(full, clear_soft_refs, size, is_tlab);
 484 
 485     rp->disable_discovery();
 486     rp->verify_no_references_recorded();
 487   }
 488 
 489   COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::update_pointers());
 490 
 491   gen->stat_record()->accumulated_time.stop();
 492 
 493   update_gc_stats(gen, full);
 494 
 495   if (run_verification && VerifyAfterGC) {
 496     Universe::verify("After GC");
 497   }
 498 }
 499 
 500 void GenCollectedHeap::do_collection(bool           full,
 501                                      bool           clear_all_soft_refs,
 502                                      size_t         size,
 503                                      bool           is_tlab,
 504                                      GenerationType max_generation) {
 505   ResourceMark rm;
 506   DEBUG_ONLY(Thread* my_thread = Thread::current();)
 507 
 508   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 509   assert(my_thread->is_VM_thread(), "only VM thread");
 510   assert(Heap_lock->is_locked(),
 511          "the requesting thread should have the Heap_lock");
 512   guarantee(!is_gc_active(), "collection is not reentrant");
 513 
 514   if (GCLocker::check_active_before_gc()) {
 515     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
 516   }
 517 
 518   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
 519                           soft_ref_policy()->should_clear_all_soft_refs();
 520 
 521   ClearedAllSoftRefs casr(do_clear_all_soft_refs, soft_ref_policy());
 522 
 523   AutoModifyRestore<bool> temporarily(_is_gc_active, true);
 524 
 525   bool complete = full && (max_generation == OldGen);
 526   bool old_collects_young = complete && !ScavengeBeforeFullGC;
 527   bool do_young_collection = !old_collects_young && _young_gen->should_collect(full, size, is_tlab);
 528 
 529   const PreGenGCValues pre_gc_values = get_pre_gc_values();
 530 
 531   bool run_verification = total_collections() >= VerifyGCStartAt;
 532   bool prepared_for_verification = false;
 533   bool do_full_collection = false;
 534 
 535   if (do_young_collection) {
 536     GCIdMark gc_id_mark;
 537     GCTraceCPUTime tcpu;
 538     GCTraceTime(Info, gc) t("Pause Young", NULL, gc_cause(), true);
 539 
 540     print_heap_before_gc();
 541 
 542     if (run_verification && VerifyGCLevel <= 0 && VerifyBeforeGC) {
 543       prepare_for_verify();
 544       prepared_for_verification = true;
 545     }
 546 
 547     gc_prologue(complete);
 548     increment_total_collections(complete);
 549 
 550     collect_generation(_young_gen,
 551                        full,
 552                        size,
 553                        is_tlab,
 554                        run_verification && VerifyGCLevel <= 0,
 555                        do_clear_all_soft_refs);
 556 
 557     if (size > 0 && (!is_tlab || _young_gen->supports_tlab_allocation()) &&
 558         size * HeapWordSize <= _young_gen->unsafe_max_alloc_nogc()) {
 559       // Allocation request was met by young GC.
 560       size = 0;
 561     }
 562 
 563     // Ask if young collection is enough. If so, do the final steps for young collection,
 564     // and fallthrough to the end.
 565     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 566     if (!do_full_collection) {
 567       // Adjust generation sizes.
 568       _young_gen->compute_new_size();
 569 
 570       print_heap_change(pre_gc_values);
 571 
 572       // Track memory usage and detect low memory after GC finishes
 573       MemoryService::track_memory_usage();
 574 
 575       gc_epilogue(complete);
 576     }
 577 
 578     print_heap_after_gc();
 579 
 580   } else {
 581     // No young collection, ask if we need to perform Full collection.
 582     do_full_collection = should_do_full_collection(size, full, is_tlab, max_generation);
 583   }
 584 
 585   if (do_full_collection) {
 586     GCIdMark gc_id_mark;
 587     GCTraceCPUTime tcpu;
 588     GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause(), true);
 589 
 590     print_heap_before_gc();
 591 
 592     if (!prepared_for_verification && run_verification &&
 593         VerifyGCLevel <= 1 && VerifyBeforeGC) {
 594       prepare_for_verify();
 595     }
 596 
 597     if (!do_young_collection) {
 598       gc_prologue(complete);
 599       increment_total_collections(complete);
 600     }
 601 
 602     // Accounting quirk: total full collections would be incremented when "complete"
 603     // is set, by calling increment_total_collections above. However, we also need to
 604     // account Full collections that had "complete" unset.
 605     if (!complete) {
 606       increment_total_full_collections();
 607     }
 608 
 609     collect_generation(_old_gen,
 610                        full,
 611                        size,
 612                        is_tlab,
 613                        run_verification && VerifyGCLevel <= 1,
 614                        do_clear_all_soft_refs);
 615 
 616     // Adjust generation sizes.
 617     _old_gen->compute_new_size();
 618     _young_gen->compute_new_size();
 619 
 620     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 621     ClassLoaderDataGraph::purge(/*at_safepoint*/true);
 622     DEBUG_ONLY(MetaspaceUtils::verify();)
 623     // Resize the metaspace capacity after full collections
 624     MetaspaceGC::compute_new_size();
 625     update_full_collections_completed();
 626 
 627     print_heap_change(pre_gc_values);
 628 
 629     // Track memory usage and detect low memory after GC finishes
 630     MemoryService::track_memory_usage();
 631 
 632     // Need to tell the epilogue code we are done with Full GC, regardless what was
 633     // the initial value for "complete" flag.
 634     gc_epilogue(true);
 635 
 636     print_heap_after_gc();
 637   }
 638 }
 639 
 640 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
 641                                                  GenCollectedHeap::GenerationType max_gen) const {
 642   return max_gen == OldGen && _old_gen->should_collect(full, size, is_tlab);
 643 }
 644 
 645 void GenCollectedHeap::register_nmethod(nmethod* nm) {
 646   ScavengableNMethods::register_nmethod(nm);
 647 }
 648 
 649 void GenCollectedHeap::unregister_nmethod(nmethod* nm) {
 650   ScavengableNMethods::unregister_nmethod(nm);
 651 }
 652 
 653 void GenCollectedHeap::verify_nmethod(nmethod* nm) {
 654   ScavengableNMethods::verify_nmethod(nm);
 655 }
 656 
 657 void GenCollectedHeap::flush_nmethod(nmethod* nm) {
 658   // Do nothing.
 659 }
 660 
 661 void GenCollectedHeap::prune_scavengable_nmethods() {
 662   ScavengableNMethods::prune_nmethods();
 663 }
 664 
 665 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
 666   GCCauseSetter x(this, GCCause::_allocation_failure);
 667   HeapWord* result = NULL;
 668 
 669   assert(size != 0, "Precondition violated");
 670   if (GCLocker::is_active_and_needs_gc()) {
 671     // GC locker is active; instead of a collection we will attempt
 672     // to expand the heap, if there's room for expansion.
 673     if (!is_maximal_no_gc()) {
 674       result = expand_heap_and_allocate(size, is_tlab);
 675     }
 676     return result;   // Could be null if we are out of space.
 677   } else if (!incremental_collection_will_fail(false /* don't consult_young */)) {
 678     // Do an incremental collection.
 679     do_collection(false,                     // full
 680                   false,                     // clear_all_soft_refs
 681                   size,                      // size
 682                   is_tlab,                   // is_tlab
 683                   GenCollectedHeap::OldGen); // max_generation
 684   } else {
 685     log_trace(gc)(" :: Trying full because partial may fail :: ");
 686     // Try a full collection; see delta for bug id 6266275
 687     // for the original code and why this has been simplified
 688     // with from-space allocation criteria modified and
 689     // such allocation moved out of the safepoint path.
 690     do_collection(true,                      // full
 691                   false,                     // clear_all_soft_refs
 692                   size,                      // size
 693                   is_tlab,                   // is_tlab
 694                   GenCollectedHeap::OldGen); // max_generation
 695   }
 696 
 697   result = attempt_allocation(size, is_tlab, false /*first_only*/);
 698 
 699   if (result != NULL) {
 700     assert(is_in_reserved(result), "result not in heap");
 701     return result;
 702   }
 703 
 704   // OK, collection failed, try expansion.
 705   result = expand_heap_and_allocate(size, is_tlab);
 706   if (result != NULL) {
 707     return result;
 708   }
 709 
 710   // If we reach this point, we're really out of memory. Try every trick
 711   // we can to reclaim memory. Force collection of soft references. Force
 712   // a complete compaction of the heap. Any additional methods for finding
 713   // free memory should be here, especially if they are expensive. If this
 714   // attempt fails, an OOM exception will be thrown.
 715   {
 716     UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
 717 
 718     do_collection(true,                      // full
 719                   true,                      // clear_all_soft_refs
 720                   size,                      // size
 721                   is_tlab,                   // is_tlab
 722                   GenCollectedHeap::OldGen); // max_generation
 723   }
 724 
 725   result = attempt_allocation(size, is_tlab, false /* first_only */);
 726   if (result != NULL) {
 727     assert(is_in_reserved(result), "result not in heap");
 728     return result;
 729   }
 730 
 731   assert(!soft_ref_policy()->should_clear_all_soft_refs(),
 732     "Flag should have been handled and cleared prior to this point");
 733 
 734   // What else?  We might try synchronous finalization later.  If the total
 735   // space available is large enough for the allocation, then a more
 736   // complete compaction phase than we've tried so far might be
 737   // appropriate.
 738   return NULL;
 739 }
 740 
 741 #ifdef ASSERT
 742 class AssertNonScavengableClosure: public OopClosure {
 743 public:
 744   virtual void do_oop(oop* p) {
 745     assert(!GenCollectedHeap::heap()->is_in_partial_collection(*p),
 746       "Referent should not be scavengable.");  }
 747   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 748 };
 749 static AssertNonScavengableClosure assert_is_non_scavengable_closure;
 750 #endif
 751 
 752 void GenCollectedHeap::process_roots(ScanningOption so,
 753                                      OopClosure* strong_roots,
 754                                      CLDClosure* strong_cld_closure,
 755                                      CLDClosure* weak_cld_closure,
 756                                      CodeBlobToOopClosure* code_roots) {
 757   // General roots.
 758   assert(code_roots != NULL, "code root closure should always be set");
 759 
 760   ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
 761 
 762   // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway
 763   CodeBlobToOopClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots;
 764 
 765   Threads::oops_do(strong_roots, roots_from_code_p);
 766 
 767   OopStorageSet::strong_oops_do(strong_roots);
 768 
 769   if (so & SO_ScavengeCodeCache) {
 770     assert(code_roots != NULL, "must supply closure for code cache");
 771 
 772     // We only visit parts of the CodeCache when scavenging.
 773     ScavengableNMethods::nmethods_do(code_roots);
 774   }
 775   if (so & SO_AllCodeCache) {
 776     assert(code_roots != NULL, "must supply closure for code cache");
 777 
 778     // CMSCollector uses this to do intermediate-strength collections.
 779     // We scan the entire code cache, since CodeCache::do_unloading is not called.
 780     CodeCache::blobs_do(code_roots);
 781   }
 782   // Verify that the code cache contents are not subject to
 783   // movement by a scavenging collection.
 784   DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations));
 785   DEBUG_ONLY(ScavengableNMethods::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
 786 }
 787 
 788 void GenCollectedHeap::full_process_roots(bool is_adjust_phase,
 789                                           ScanningOption so,
 790                                           bool only_strong_roots,
 791                                           OopClosure* root_closure,
 792                                           CLDClosure* cld_closure) {
 793   MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase);
 794   CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
 795 
 796   process_roots(so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
 797 }
 798 
 799 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
 800   WeakProcessor::oops_do(root_closure);
 801   _young_gen->ref_processor()->weak_oops_do(root_closure);
 802   _old_gen->ref_processor()->weak_oops_do(root_closure);
 803 }
 804 
 805 bool GenCollectedHeap::no_allocs_since_save_marks() {
 806   return _young_gen->no_allocs_since_save_marks() &&
 807          _old_gen->no_allocs_since_save_marks();
 808 }
 809 
 810 bool GenCollectedHeap::supports_inline_contig_alloc() const {
 811   return _young_gen->supports_inline_contig_alloc();
 812 }
 813 
 814 HeapWord* volatile* GenCollectedHeap::top_addr() const {
 815   return _young_gen->top_addr();
 816 }
 817 
 818 HeapWord** GenCollectedHeap::end_addr() const {
 819   return _young_gen->end_addr();
 820 }
 821 
 822 // public collection interfaces
 823 
 824 void GenCollectedHeap::collect(GCCause::Cause cause) {
 825   if ((cause == GCCause::_wb_young_gc) ||
 826       (cause == GCCause::_gc_locker)) {
 827     // Young collection for WhiteBox or GCLocker.
 828     collect(cause, YoungGen);
 829   } else {
 830 #ifdef ASSERT
 831   if (cause == GCCause::_scavenge_alot) {
 832     // Young collection only.
 833     collect(cause, YoungGen);
 834   } else {
 835     // Stop-the-world full collection.
 836     collect(cause, OldGen);
 837   }
 838 #else
 839     // Stop-the-world full collection.
 840     collect(cause, OldGen);
 841 #endif
 842   }
 843 }
 844 
 845 void GenCollectedHeap::collect(GCCause::Cause cause, GenerationType max_generation) {
 846   // The caller doesn't have the Heap_lock
 847   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 848   MutexLocker ml(Heap_lock);
 849   collect_locked(cause, max_generation);
 850 }
 851 
 852 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
 853   // The caller has the Heap_lock
 854   assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
 855   collect_locked(cause, OldGen);
 856 }
 857 
 858 // this is the private collection interface
 859 // The Heap_lock is expected to be held on entry.
 860 
 861 void GenCollectedHeap::collect_locked(GCCause::Cause cause, GenerationType max_generation) {
 862   // Read the GC count while holding the Heap_lock
 863   unsigned int gc_count_before      = total_collections();
 864   unsigned int full_gc_count_before = total_full_collections();
 865 
 866   if (GCLocker::should_discard(cause, gc_count_before)) {
 867     return;
 868   }
 869 
 870   {
 871     MutexUnlocker mu(Heap_lock);  // give up heap lock, execute gets it back
 872     VM_GenCollectFull op(gc_count_before, full_gc_count_before,
 873                          cause, max_generation);
 874     VMThread::execute(&op);
 875   }
 876 }
 877 
 878 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
 879    do_full_collection(clear_all_soft_refs, OldGen);
 880 }
 881 
 882 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
 883                                           GenerationType last_generation) {
 884   do_collection(true,                   // full
 885                 clear_all_soft_refs,    // clear_all_soft_refs
 886                 0,                      // size
 887                 false,                  // is_tlab
 888                 last_generation);       // last_generation
 889   // Hack XXX FIX ME !!!
 890   // A scavenge may not have been attempted, or may have
 891   // been attempted and failed, because the old gen was too full
 892   if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
 893     log_debug(gc, jni)("GC locker: Trying a full collection because scavenge failed");
 894     // This time allow the old gen to be collected as well
 895     do_collection(true,                // full
 896                   clear_all_soft_refs, // clear_all_soft_refs
 897                   0,                   // size
 898                   false,               // is_tlab
 899                   OldGen);             // last_generation
 900   }
 901 }
 902 
 903 bool GenCollectedHeap::is_in_young(oop p) {
 904   bool result = cast_from_oop<HeapWord*>(p) < _old_gen->reserved().start();
 905   assert(result == _young_gen->is_in_reserved(p),
 906          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
 907   return result;
 908 }
 909 
 910 // Returns "TRUE" iff "p" points into the committed areas of the heap.
 911 bool GenCollectedHeap::is_in(const void* p) const {
 912   return _young_gen->is_in(p) || _old_gen->is_in(p);
 913 }
 914 
 915 #ifdef ASSERT
 916 // Don't implement this by using is_in_young().  This method is used
 917 // in some cases to check that is_in_young() is correct.
 918 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
 919   assert(is_in_reserved(p) || p == NULL,
 920     "Does not work if address is non-null and outside of the heap");
 921   return p < _young_gen->reserved().end() && p != NULL;
 922 }
 923 #endif
 924 
 925 void GenCollectedHeap::oop_iterate(OopIterateClosure* cl) {
 926   _young_gen->oop_iterate(cl);
 927   _old_gen->oop_iterate(cl);
 928 }
 929 
 930 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
 931   _young_gen->object_iterate(cl);
 932   _old_gen->object_iterate(cl);
 933 }
 934 
 935 Space* GenCollectedHeap::space_containing(const void* addr) const {
 936   Space* res = _young_gen->space_containing(addr);
 937   if (res != NULL) {
 938     return res;
 939   }
 940   res = _old_gen->space_containing(addr);
 941   assert(res != NULL, "Could not find containing space");
 942   return res;
 943 }
 944 
 945 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
 946   assert(is_in_reserved(addr), "block_start of address outside of heap");
 947   if (_young_gen->is_in_reserved(addr)) {
 948     assert(_young_gen->is_in(addr), "addr should be in allocated part of generation");
 949     return _young_gen->block_start(addr);
 950   }
 951 
 952   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 953   assert(_old_gen->is_in(addr), "addr should be in allocated part of generation");
 954   return _old_gen->block_start(addr);
 955 }
 956 
 957 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
 958   assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
 959   assert(block_start(addr) == addr, "addr must be a block start");
 960   if (_young_gen->is_in_reserved(addr)) {
 961     return _young_gen->block_is_obj(addr);
 962   }
 963 
 964   assert(_old_gen->is_in_reserved(addr), "Some generation should contain the address");
 965   return _old_gen->block_is_obj(addr);
 966 }
 967 
 968 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
 969   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 970   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 971   return _young_gen->tlab_capacity();
 972 }
 973 
 974 size_t GenCollectedHeap::tlab_used(Thread* thr) const {
 975   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 976   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 977   return _young_gen->tlab_used();
 978 }
 979 
 980 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
 981   assert(!_old_gen->supports_tlab_allocation(), "Old gen supports TLAB allocation?!");
 982   assert(_young_gen->supports_tlab_allocation(), "Young gen doesn't support TLAB allocation?!");
 983   return _young_gen->unsafe_max_tlab_alloc();
 984 }
 985 
 986 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t min_size,
 987                                               size_t requested_size,
 988                                               size_t* actual_size) {
 989   bool gc_overhead_limit_was_exceeded;
 990   HeapWord* result = mem_allocate_work(requested_size /* size */,
 991                                        true /* is_tlab */,
 992                                        &gc_overhead_limit_was_exceeded);
 993   if (result != NULL) {
 994     *actual_size = requested_size;
 995   }
 996 
 997   return result;
 998 }
 999 
1000 // Requires "*prev_ptr" to be non-NULL.  Deletes and a block of minimal size
1001 // from the list headed by "*prev_ptr".
1002 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
1003   bool first = true;
1004   size_t min_size = 0;   // "first" makes this conceptually infinite.
1005   ScratchBlock **smallest_ptr, *smallest;
1006   ScratchBlock  *cur = *prev_ptr;
1007   while (cur) {
1008     assert(*prev_ptr == cur, "just checking");
1009     if (first || cur->num_words < min_size) {
1010       smallest_ptr = prev_ptr;
1011       smallest     = cur;
1012       min_size     = smallest->num_words;
1013       first        = false;
1014     }
1015     prev_ptr = &cur->next;
1016     cur     =  cur->next;
1017   }
1018   smallest      = *smallest_ptr;
1019   *smallest_ptr = smallest->next;
1020   return smallest;
1021 }
1022 
1023 // Sort the scratch block list headed by res into decreasing size order,
1024 // and set "res" to the result.
1025 static void sort_scratch_list(ScratchBlock*& list) {
1026   ScratchBlock* sorted = NULL;
1027   ScratchBlock* unsorted = list;
1028   while (unsorted) {
1029     ScratchBlock *smallest = removeSmallestScratch(&unsorted);
1030     smallest->next  = sorted;
1031     sorted          = smallest;
1032   }
1033   list = sorted;
1034 }
1035 
1036 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1037                                                size_t max_alloc_words) {
1038   ScratchBlock* res = NULL;
1039   _young_gen->contribute_scratch(res, requestor, max_alloc_words);
1040   _old_gen->contribute_scratch(res, requestor, max_alloc_words);
1041   sort_scratch_list(res);
1042   return res;
1043 }
1044 
1045 void GenCollectedHeap::release_scratch() {
1046   _young_gen->reset_scratch();
1047   _old_gen->reset_scratch();
1048 }
1049 
1050 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1051   void do_generation(Generation* gen) {
1052     gen->prepare_for_verify();
1053   }
1054 };
1055 
1056 void GenCollectedHeap::prepare_for_verify() {
1057   ensure_parsability(false);        // no need to retire TLABs
1058   GenPrepareForVerifyClosure blk;
1059   generation_iterate(&blk, false);
1060 }
1061 
1062 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1063                                           bool old_to_young) {
1064   if (old_to_young) {
1065     cl->do_generation(_old_gen);
1066     cl->do_generation(_young_gen);
1067   } else {
1068     cl->do_generation(_young_gen);
1069     cl->do_generation(_old_gen);
1070   }
1071 }
1072 
1073 bool GenCollectedHeap::is_maximal_no_gc() const {
1074   return _young_gen->is_maximal_no_gc() && _old_gen->is_maximal_no_gc();
1075 }
1076 
1077 void GenCollectedHeap::save_marks() {
1078   _young_gen->save_marks();
1079   _old_gen->save_marks();
1080 }
1081 
1082 GenCollectedHeap* GenCollectedHeap::heap() {
1083   // SerialHeap is the only subtype of GenCollectedHeap.
1084   return named_heap<GenCollectedHeap>(CollectedHeap::Serial);
1085 }
1086 
1087 #if INCLUDE_SERIALGC
1088 void GenCollectedHeap::prepare_for_compaction() {
1089   // Start by compacting into same gen.
1090   CompactPoint cp(_old_gen);
1091   _old_gen->prepare_for_compaction(&cp);
1092   _young_gen->prepare_for_compaction(&cp);
1093 }
1094 #endif // INCLUDE_SERIALGC
1095 
1096 void GenCollectedHeap::verify(VerifyOption option /* ignored */) {
1097   log_debug(gc, verify)("%s", _old_gen->name());
1098   _old_gen->verify();
1099 
1100   log_debug(gc, verify)("%s", _old_gen->name());
1101   _young_gen->verify();
1102 
1103   log_debug(gc, verify)("RemSet");
1104   rem_set()->verify();
1105 }
1106 
1107 void GenCollectedHeap::print_on(outputStream* st) const {
1108   if (_young_gen != NULL) {
1109     _young_gen->print_on(st);
1110   }
1111   if (_old_gen != NULL) {
1112     _old_gen->print_on(st);
1113   }
1114   MetaspaceUtils::print_on(st);
1115 }
1116 
1117 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1118 }
1119 
1120 bool GenCollectedHeap::print_location(outputStream* st, void* addr) const {
1121   return BlockLocationPrinter<GenCollectedHeap>::print_location(st, addr);
1122 }
1123 
1124 void GenCollectedHeap::print_tracing_info() const {
1125   if (log_is_enabled(Debug, gc, heap, exit)) {
1126     LogStreamHandle(Debug, gc, heap, exit) lsh;
1127     _young_gen->print_summary_info_on(&lsh);
1128     _old_gen->print_summary_info_on(&lsh);
1129   }
1130 }
1131 
1132 void GenCollectedHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
1133   const DefNewGeneration* const def_new_gen = (DefNewGeneration*) young_gen();
1134 
1135   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
1136                      HEAP_CHANGE_FORMAT" "
1137                      HEAP_CHANGE_FORMAT,
1138                      HEAP_CHANGE_FORMAT_ARGS(def_new_gen->short_name(),
1139                                              pre_gc_values.young_gen_used(),
1140                                              pre_gc_values.young_gen_capacity(),
1141                                              def_new_gen->used(),
1142                                              def_new_gen->capacity()),
1143                      HEAP_CHANGE_FORMAT_ARGS("Eden",
1144                                              pre_gc_values.eden_used(),
1145                                              pre_gc_values.eden_capacity(),
1146                                              def_new_gen->eden()->used(),
1147                                              def_new_gen->eden()->capacity()),
1148                      HEAP_CHANGE_FORMAT_ARGS("From",
1149                                              pre_gc_values.from_used(),
1150                                              pre_gc_values.from_capacity(),
1151                                              def_new_gen->from()->used(),
1152                                              def_new_gen->from()->capacity()));
1153   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
1154                      HEAP_CHANGE_FORMAT_ARGS(old_gen()->short_name(),
1155                                              pre_gc_values.old_gen_used(),
1156                                              pre_gc_values.old_gen_capacity(),
1157                                              old_gen()->used(),
1158                                              old_gen()->capacity()));
1159   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
1160 }
1161 
1162 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1163  private:
1164   bool _full;
1165  public:
1166   void do_generation(Generation* gen) {
1167     gen->gc_prologue(_full);
1168   }
1169   GenGCPrologueClosure(bool full) : _full(full) {};
1170 };
1171 
1172 void GenCollectedHeap::gc_prologue(bool full) {
1173   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1174 
1175   // Fill TLAB's and such
1176   ensure_parsability(true);   // retire TLABs
1177 
1178   // Walk generations
1179   GenGCPrologueClosure blk(full);
1180   generation_iterate(&blk, false);  // not old-to-young.
1181 };
1182 
1183 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1184  private:
1185   bool _full;
1186  public:
1187   void do_generation(Generation* gen) {
1188     gen->gc_epilogue(_full);
1189   }
1190   GenGCEpilogueClosure(bool full) : _full(full) {};
1191 };
1192 
1193 void GenCollectedHeap::gc_epilogue(bool full) {
1194 #if COMPILER2_OR_JVMCI
1195   assert(DerivedPointerTable::is_empty(), "derived pointer present");
1196   size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1197   guarantee(!CompilerConfig::is_c2_or_jvmci_compiler_enabled() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1198 #endif // COMPILER2_OR_JVMCI
1199 
1200   resize_all_tlabs();
1201 
1202   GenGCEpilogueClosure blk(full);
1203   generation_iterate(&blk, false);  // not old-to-young.
1204 
1205   MetaspaceCounters::update_performance_counters();
1206 };
1207 
1208 #ifndef PRODUCT
1209 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1210  private:
1211  public:
1212   void do_generation(Generation* gen) {
1213     gen->record_spaces_top();
1214   }
1215 };
1216 
1217 void GenCollectedHeap::record_gen_tops_before_GC() {
1218   if (ZapUnusedHeapArea) {
1219     GenGCSaveTopsBeforeGCClosure blk;
1220     generation_iterate(&blk, false);  // not old-to-young.
1221   }
1222 }
1223 #endif  // not PRODUCT
1224 
1225 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1226  public:
1227   void do_generation(Generation* gen) {
1228     gen->ensure_parsability();
1229   }
1230 };
1231 
1232 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1233   CollectedHeap::ensure_parsability(retire_tlabs);
1234   GenEnsureParsabilityClosure ep_cl;
1235   generation_iterate(&ep_cl, false);
1236 }
1237 
1238 oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
1239                                               oop obj,
1240                                               size_t obj_size) {
1241   guarantee(old_gen == _old_gen, "We only get here with an old generation");
1242   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1243   HeapWord* result = NULL;
1244 
1245   result = old_gen->expand_and_allocate(obj_size, false);
1246 
1247   if (result != NULL) {
1248     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size);
1249   }
1250   return cast_to_oop(result);
1251 }