1 /*
   2  * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardSet.hpp"
  31 #include "gc/g1/g1CardTable.hpp"
  32 #include "gc/g1/g1CollectionSet.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ConcurrentMark.hpp"
  35 #include "gc/g1/g1EdenRegions.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1HeapRegionAttr.hpp"
  38 #include "gc/g1/g1HeapRegionManager.hpp"
  39 #include "gc/g1/g1HeapRegionSet.hpp"
  40 #include "gc/g1/g1HeapTransition.hpp"
  41 #include "gc/g1/g1HeapVerifier.hpp"
  42 #include "gc/g1/g1MonitoringSupport.hpp"
  43 #include "gc/g1/g1MonotonicArenaFreeMemoryTask.hpp"
  44 #include "gc/g1/g1MonotonicArenaFreePool.hpp"
  45 #include "gc/g1/g1NUMA.hpp"
  46 #include "gc/g1/g1SurvivorRegions.hpp"
  47 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
  48 #include "gc/shared/barrierSet.hpp"
  49 #include "gc/shared/collectedHeap.hpp"
  50 #include "gc/shared/gcHeapSummary.hpp"
  51 #include "gc/shared/plab.hpp"
  52 #include "gc/shared/taskqueue.hpp"
  53 #include "memory/allocation.hpp"
  54 #include "memory/iterator.hpp"
  55 #include "memory/memRegion.hpp"
  56 #include "runtime/atomic.hpp"
  57 #include "runtime/mutexLocker.hpp"
  58 #include "runtime/threadSMR.hpp"
  59 #include "utilities/bitMap.hpp"
  60 
  61 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  62 // It uses the "Garbage First" heap organization and algorithm, which
  63 // may combine concurrent marking with parallel, incremental compaction of
  64 // heap subsets that will yield large amounts of garbage.
  65 
  66 // Forward declarations
  67 class G1Allocator;
  68 class G1BatchedTask;
  69 class G1CardTableEntryClosure;
  70 class G1ConcurrentMark;
  71 class G1ConcurrentMarkThread;
  72 class G1ConcurrentRefine;
  73 class G1GCCounters;
  74 class G1GCPhaseTimes;
  75 class G1HeapSizingPolicy;
  76 class G1NewTracer;
  77 class G1RemSet;
  78 class G1ReviseYoungLengthTask;
  79 class G1ServiceTask;
  80 class G1ServiceThread;
  81 class GCMemoryManager;
  82 class G1HeapRegion;
  83 class MemoryPool;
  84 class nmethod;
  85 class PartialArrayStateManager;
  86 class ReferenceProcessor;
  87 class STWGCTimer;
  88 class WorkerThreads;
  89 
  90 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  91 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  92 
  93 typedef int RegionIdx_t;   // needs to hold [ 0..max_num_regions() )
  94 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  95 
  96 // The G1 STW is alive closure.
  97 // An instance is embedded into the G1CH and used as the
  98 // (optional) _is_alive_non_header closure in the STW
  99 // reference processor. It is also extensively used during
 100 // reference processing during STW evacuation pauses.
 101 class G1STWIsAliveClosure : public BoolObjectClosure {
 102   G1CollectedHeap* _g1h;
 103 public:
 104   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 105   bool do_object_b(oop p) override;
 106 };
 107 
 108 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 109   G1CollectedHeap* _g1h;
 110 public:
 111   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 112   bool do_object_b(oop p) override;
 113 };
 114 
 115 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 116  private:
 117   void reset_from_card_cache(uint start_idx, size_t num_regions);
 118  public:
 119   void on_commit(uint start_idx, size_t num_regions, bool zero_filled) override;
 120 };
 121 
 122 // Helper to claim contiguous sets of JavaThread for processing by multiple threads.
 123 class G1JavaThreadsListClaimer : public StackObj {
 124   ThreadsListHandle _list;
 125   uint _claim_step;
 126 
 127   Atomic<uint> _cur_claim;
 128 
 129   // Attempts to claim _claim_step JavaThreads, returning an array of claimed
 130   // JavaThread* with count elements. Returns null (and a zero count) if there
 131   // are no more threads to claim.
 132   JavaThread* const* claim(uint& count);
 133 
 134 public:
 135   G1JavaThreadsListClaimer(uint claim_step) : _list(), _claim_step(claim_step), _cur_claim(0) {
 136     assert(claim_step > 0, "must be");
 137   }
 138 
 139   // Executes the given closure on the elements of the JavaThread list, chunking the
 140   // JavaThread set in claim_step chunks for each caller to reduce parallelization
 141   // overhead.
 142   void apply(ThreadClosure* cl);
 143 
 144   // Total number of JavaThreads that can be claimed.
 145   uint length() const { return _list.length(); }
 146 };
 147 
 148 class G1CollectedHeap : public CollectedHeap {
 149   friend class VM_G1CollectForAllocation;
 150   friend class VM_G1CollectFull;
 151   friend class VM_G1TryInitiateConcMark;
 152   friend class VMStructs;
 153   friend class MutatorAllocRegion;
 154   friend class G1FullCollector;
 155   friend class G1GCAllocRegion;
 156   friend class G1HeapVerifier;
 157 
 158   friend class G1YoungGCVerifierMark;
 159 
 160   // Closures used in implementation.
 161   friend class G1EvacuateRegionsTask;
 162   friend class G1PLABAllocator;
 163 
 164   // Other related classes.
 165   friend class G1HeapPrinterMark;
 166   friend class G1HeapRegionClaimer;
 167 
 168   // Testing classes.
 169   friend class G1CheckRegionAttrTableClosure;
 170 
 171 private:
 172   // GC Overhead Limit functionality related members.
 173   //
 174   // The goal is to return null for allocations prematurely (before really going
 175   // OOME) in case both GC CPU usage (>= GCTimeLimit) and not much available free
 176   // memory (<= GCHeapFreeLimit) so that applications can exit gracefully or try
 177   // to keep running by easing off memory.
 178   uintx _gc_overhead_counter;        // The number of consecutive garbage collections we were over the limits.
 179 
 180   void update_gc_overhead_counter();
 181   bool gc_overhead_limit_exceeded();
 182 
 183   G1ServiceThread* _service_thread;
 184   G1ServiceTask* _periodic_gc_task;
 185   G1MonotonicArenaFreeMemoryTask* _free_arena_memory_task;
 186   G1ReviseYoungLengthTask* _revise_young_length_task;
 187 
 188   WorkerThreads* _workers;
 189 
 190   // The current epoch for refinement, i.e. the number of times the card tables
 191   // have been swapped by a garbage collection.
 192   // Used for detecting whether concurrent refinement has been interrupted by a
 193   // garbage collection.
 194   size_t _refinement_epoch;
 195 
 196   // The following members are for tracking safepoint durations between garbage
 197   // collections.
 198   jlong _last_synchronized_start;
 199 
 200   jlong _last_refinement_epoch_start;
 201   jlong _yield_duration_in_refinement_epoch;       // Time spent in safepoints since beginning of last refinement epoch.
 202   size_t _last_safepoint_refinement_epoch;         // Refinement epoch before last safepoint.
 203 
 204   Ticks _collection_pause_end;
 205 
 206   static size_t _humongous_object_threshold_in_words;
 207 
 208   // These sets keep track of old and humongous regions respectively.
 209   G1HeapRegionSet _old_set;
 210   G1HeapRegionSet _humongous_set;
 211 
 212   // Young gen memory statistics before GC.
 213   G1MonotonicArenaMemoryStats _young_gen_card_set_stats;
 214   // Collection set candidates memory statistics after GC.
 215   G1MonotonicArenaMemoryStats _collection_set_candidates_card_set_stats;
 216 
 217   // The block offset table for the G1 heap.
 218   G1BlockOffsetTable* _bot;
 219 
 220 public:
 221   void rebuild_free_region_list();
 222   // Start a new incremental collection set for the next pause.
 223   void start_new_collection_set();
 224 
 225   void prepare_region_for_full_compaction(G1HeapRegion* hr);
 226 
 227 private:
 228   // Rebuilds the region sets / lists so that they are repopulated to
 229   // reflect the contents of the heap. The only exception is the
 230   // humongous set which was not torn down in the first place. If
 231   // free_list_only is true, it will only rebuild the free list.
 232   void rebuild_region_sets(bool free_list_only);
 233 
 234   // Callback for region mapping changed events.
 235   G1RegionMappingChangedListener _listener;
 236 
 237   // Handle G1 NUMA support.
 238   G1NUMA* _numa;
 239 
 240   // The sequence of all heap regions in the heap.
 241   G1HeapRegionManager _hrm;
 242 
 243   // Manages all allocations with regions except humongous object allocations.
 244   G1Allocator* _allocator;
 245 
 246   G1YoungGCAllocationFailureInjector _allocation_failure_injector;
 247 
 248   // Manages all heap verification.
 249   G1HeapVerifier* _verifier;
 250 
 251   // Outside of GC pauses, the number of bytes used in all regions other
 252   // than the current allocation region(s).
 253   volatile size_t _summary_bytes_used;
 254 
 255   void increase_used(size_t bytes);
 256   void decrease_used(size_t bytes);
 257 
 258   void set_used(size_t bytes);
 259 
 260   // Number of bytes used in all regions during GC. Typically changed when
 261   // retiring a GC alloc region.
 262   size_t _bytes_used_during_gc;
 263 
 264 public:
 265   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 266 
 267 private:
 268   // GC allocation statistics policy for survivors.
 269   G1EvacStats _survivor_evac_stats;
 270 
 271   // GC allocation statistics policy for tenured objects.
 272   G1EvacStats _old_evac_stats;
 273 
 274   // Helper for monitoring and management support.
 275   G1MonitoringSupport* _monitoring_support;
 276 
 277   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 278   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 279 public:
 280   uint num_humongous_objects() const { return _num_humongous_objects; }
 281   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 282   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 283 
 284   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 285 
 286   bool should_sample_collection_set_candidates() const;
 287   void set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats);
 288   void set_young_gen_card_set_stats(const G1MonotonicArenaMemoryStats& stats);
 289 
 290   void update_perf_counter_cpu_time();
 291 private:
 292 
 293   // Return true if an explicit GC should start a concurrent cycle instead
 294   // of doing a STW full GC. A concurrent cycle should be started if:
 295   // (a) cause == _g1_humongous_allocation,
 296   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 297   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 298   // (d) cause == _wb_breakpoint,
 299   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 300   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 301 
 302   // Wait until a full mark (either currently in progress or one that completed
 303   // after the current request) has finished. Returns whether that full mark started
 304   // after this request. If so, we typically do not need another one.
 305   bool wait_full_mark_finished(GCCause::Cause cause,
 306                                uint old_marking_started_before,
 307                                uint old_marking_started_after,
 308                                uint old_marking_completed_after);
 309 
 310   // Attempt to start a concurrent cycle with the indicated cause, for potentially
 311   // allocating allocation_word_size words.
 312   // precondition: should_do_concurrent_full_gc(cause)
 313   bool try_collect_concurrently(size_t allocation_word_size,
 314                                 GCCause::Cause cause,
 315                                 uint gc_counter,
 316                                 uint old_marking_started_before);
 317 
 318   // indicates whether we are in young or mixed GC mode
 319   G1CollectorState _collector_state;
 320 
 321   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 322   // concurrent cycles) we have started.
 323   volatile uint _old_marking_cycles_started;
 324 
 325   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 326   // concurrent cycles) we have completed.
 327   volatile uint _old_marking_cycles_completed;
 328 
 329   // Create a memory mapper for auxiliary data structures of the given size and
 330   // translation factor.
 331   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 332                                                          size_t size,
 333                                                          size_t translation_factor);
 334 
 335   void trace_heap(GCWhen::Type when, const GCTracer* tracer) override;
 336 
 337   // These are macros so that, if the assert fires, we get the correct
 338   // line number, file, etc.
 339 
 340 #define heap_locking_asserts_params(_extra_message_)                          \
 341   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 342   (_extra_message_),                                                          \
 343   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 344   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 345   BOOL_TO_STR(Thread::current()->is_VM_thread())
 346 
 347 #define assert_heap_locked()                                                  \
 348   do {                                                                        \
 349     assert(Heap_lock->owned_by_self(),                                        \
 350            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 351   } while (0)
 352 
 353 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 354   do {                                                                        \
 355     assert(Heap_lock->owned_by_self() ||                                      \
 356            (SafepointSynchronize::is_at_safepoint() &&                        \
 357              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 358            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 359                                         "should be at a safepoint"));         \
 360   } while (0)
 361 
 362 #define assert_heap_locked_and_not_at_safepoint()                             \
 363   do {                                                                        \
 364     assert(Heap_lock->owned_by_self() &&                                      \
 365                                     !SafepointSynchronize::is_at_safepoint(), \
 366           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 367                                        "should not be at a safepoint"));      \
 368   } while (0)
 369 
 370 #define assert_heap_not_locked()                                              \
 371   do {                                                                        \
 372     assert(!Heap_lock->owned_by_self(),                                       \
 373         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 374   } while (0)
 375 
 376 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 377   do {                                                                        \
 378     assert(!Heap_lock->owned_by_self() &&                                     \
 379                                     !SafepointSynchronize::is_at_safepoint(), \
 380       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 381                                    "should not be at a safepoint"));          \
 382   } while (0)
 383 
 384 #define assert_at_safepoint_on_vm_thread()                                        \
 385   do {                                                                            \
 386     assert_at_safepoint();                                                        \
 387     assert(Thread::current_or_null() != nullptr, "no current thread");            \
 388     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 389   } while (0)
 390 
 391 #ifdef ASSERT
 392 #define assert_used_and_recalculate_used_equal(g1h)                           \
 393   do {                                                                        \
 394     size_t cur_used_bytes = g1h->used();                                      \
 395     size_t recal_used_bytes = g1h->recalculate_used();                        \
 396     assert(cur_used_bytes == recal_used_bytes, "Used(%zu) is not" \
 397            " same as recalculated used(%zu).",                    \
 398            cur_used_bytes, recal_used_bytes);                                 \
 399   } while (0)
 400 #else
 401 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
 402 #endif
 403 
 404   // The young region list.
 405   G1EdenRegions _eden;
 406   G1SurvivorRegions _survivor;
 407 
 408   STWGCTimer* _gc_timer_stw;
 409 
 410   G1NewTracer* _gc_tracer_stw;
 411 
 412   // The current policy object for the collector.
 413   G1Policy* _policy;
 414   G1HeapSizingPolicy* _heap_sizing_policy;
 415 
 416   G1CollectionSet _collection_set;
 417 
 418   // Try to allocate a single non-humongous G1HeapRegion sufficient for
 419   // an allocation of the given word_size. If do_expand is true,
 420   // attempt to expand the heap if necessary to satisfy the allocation
 421   // request. 'type' takes the type of region to be allocated. (Use constants
 422   // Old, Eden, Humongous, Survivor defined in G1HeapRegionType.)
 423   G1HeapRegion* new_region(size_t word_size,
 424                            G1HeapRegionType type,
 425                            bool do_expand,
 426                            uint node_index = G1NUMA::AnyNodeIndex);
 427 
 428   // Initialize a contiguous set of free regions of length num_regions
 429   // and starting at index first so that they appear as a single
 430   // humongous region.
 431   HeapWord* humongous_obj_allocate_initialize_regions(G1HeapRegion* first_hr,
 432                                                       uint num_regions,
 433                                                       size_t word_size);
 434 
 435   // Attempt to allocate a humongous object of the given size. Return
 436   // null if unsuccessful.
 437   HeapWord* humongous_obj_allocate(size_t word_size);
 438 
 439   // The following two methods, allocate_new_tlab() and
 440   // mem_allocate(), are the two main entry points from the runtime
 441   // into the G1's allocation routines. They have the following
 442   // assumptions:
 443   //
 444   // * They should both be called outside safepoints.
 445   //
 446   // * They should both be called without holding the Heap_lock.
 447   //
 448   // * All allocation requests for new TLABs should go to
 449   //   allocate_new_tlab().
 450   //
 451   // * All non-TLAB allocation requests should go to mem_allocate().
 452   //
 453   // * If either call cannot satisfy the allocation request using the
 454   //   current allocating region, they will try to get a new one. If
 455   //   this fails, (only) mem_allocate() will attempt to do an evacuation
 456   //   pause and retry the allocation. Allocate_new_tlab() will return null,
 457   //   deferring to the following mem_allocate().
 458   //
 459   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 460   //   should never be called with word_size being humongous. All
 461   //   humongous allocation requests should go to mem_allocate() which
 462   //   will satisfy them in a special path.
 463 
 464   HeapWord* allocate_new_tlab(size_t min_size,
 465                               size_t requested_size,
 466                               size_t* actual_size) override;
 467 
 468   HeapWord* mem_allocate(size_t word_size) override;
 469 
 470   // First-level mutator allocation attempt: try to allocate out of
 471   // the mutator alloc region without taking the Heap_lock. This
 472   // should only be used for non-humongous allocations.
 473   inline HeapWord* attempt_allocation(size_t min_word_size,
 474                                       size_t desired_word_size,
 475                                       size_t* actual_word_size,
 476                                       bool allow_gc);
 477   // Second-level mutator allocation attempt: take the Heap_lock and
 478   // retry the allocation attempt, potentially scheduling a GC
 479   // pause if allow_gc is set. This should only be used for non-humongous
 480   // allocations.
 481   HeapWord* attempt_allocation_slow(uint node_index, size_t word_size, bool allow_gc);
 482 
 483   // Takes the Heap_lock and attempts a humongous allocation. It can
 484   // potentially schedule a GC pause.
 485   HeapWord* attempt_allocation_humongous(size_t word_size);
 486 
 487   // Allocation attempt that should be called during safepoints (e.g.,
 488   // at the end of a successful GC). expect_null_mutator_alloc_region
 489   // specifies whether the mutator alloc region is expected to be null
 490   // or not.
 491   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 492                                             bool expect_null_mutator_alloc_region);
 493 
 494   // These methods are the "callbacks" from the G1AllocRegion class.
 495 
 496   // For mutator alloc regions.
 497   G1HeapRegion* new_mutator_alloc_region(size_t word_size, uint node_index);
 498   void retire_mutator_alloc_region(G1HeapRegion* alloc_region,
 499                                    size_t allocated_bytes);
 500 
 501   // For GC alloc regions.
 502   bool has_more_regions(G1HeapRegionAttr dest);
 503   G1HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
 504   void retire_gc_alloc_region(G1HeapRegion* alloc_region,
 505                               size_t allocated_bytes, G1HeapRegionAttr dest);
 506 
 507   void resize_heap(size_t resize_bytes, bool should_expand);
 508 
 509   // - if clear_all_soft_refs is true, all soft references should be
 510   //   cleared during the GC.
 511   // - if do_maximal_compaction is true, full gc will do a maximally
 512   //   compacting collection, leaving no dead wood.
 513   // - if allocation_word_size is set, then this allocation size will
 514   //    be accounted for in case shrinking of the heap happens.
 515   // - it returns false if it is unable to do the collection due to the
 516   //   GC locker being active, true otherwise.
 517   void do_full_collection(size_t allocation_word_size,
 518                           bool clear_all_soft_refs,
 519                           bool do_maximal_compaction);
 520 
 521   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 522   void do_full_collection(bool clear_all_soft_refs) override;
 523 
 524   // Helper to do a full collection that clears soft references.
 525   void upgrade_to_full_collection();
 526 
 527   // Callback from VM_G1CollectForAllocation operation.
 528   // This function does everything necessary/possible to satisfy a
 529   // failed allocation request (including collection, expansion, etc.)
 530   HeapWord* satisfy_failed_allocation(size_t word_size);
 531   // Internal helpers used during full GC to split it up to
 532   // increase readability.
 533   bool abort_concurrent_cycle();
 534   void verify_before_full_collection();
 535   void prepare_heap_for_full_collection();
 536   void prepare_for_mutator_after_full_collection(size_t allocation_word_size);
 537   void abort_refinement();
 538   void verify_after_full_collection();
 539   void print_heap_after_full_collection();
 540 
 541   // Helper method for satisfy_failed_allocation()
 542   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 543                                              bool do_gc,
 544                                              bool maximal_compaction,
 545                                              bool expect_null_mutator_alloc_region);
 546 
 547   // Attempting to expand the heap sufficiently
 548   // to support an allocation of the given "word_size".  If
 549   // successful, perform the allocation and return the address of the
 550   // allocated block, or else null.
 551   HeapWord* expand_and_allocate(size_t word_size);
 552 
 553   void verify_numa_regions(const char* desc);
 554 
 555 public:
 556   // If during a concurrent start pause we may install a pending list head which is not
 557   // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
 558   // to discover.
 559   void make_pending_list_reachable();
 560 
 561   G1ServiceThread* service_thread() const { return _service_thread; }
 562 
 563   WorkerThreads* workers() const { return _workers; }
 564 
 565   // Run the given batch task using the workers.
 566   void run_batch_task(G1BatchedTask* cl);
 567 
 568   // Return "optimal" number of chunks per region we want to use for claiming areas
 569   // within a region to claim during card table scanning.
 570   // The returned value is a trade-off between granularity of work distribution and
 571   // memory usage and maintenance costs of that table.
 572   // Testing showed that 64 for 1M/2M region, 128 for 4M/8M regions, 256 for 16/32M regions,
 573   // and so on seems to be such a good trade-off.
 574   static uint get_chunks_per_region_for_scan();
 575   // Return "optimal" number of chunks per region we want to use for claiming areas
 576   // within a region to claim during card table merging.
 577   // This is much smaller than for scanning as the merge work is much smaller.
 578   // Currently 1 for 1M regions, 2 for 2/4M regions, 4 for 8/16M regions and so on.
 579   static uint get_chunks_per_region_for_merge();
 580 
 581   G1Allocator* allocator() {
 582     return _allocator;
 583   }
 584 
 585   G1YoungGCAllocationFailureInjector* allocation_failure_injector() { return &_allocation_failure_injector; }
 586 
 587   G1HeapVerifier* verifier() {
 588     return _verifier;
 589   }
 590 
 591   G1MonitoringSupport* monitoring_support() {
 592     assert(_monitoring_support != nullptr, "should have been initialized");
 593     return _monitoring_support;
 594   }
 595 
 596   void pin_object(JavaThread* thread, oop obj) override;
 597   void unpin_object(JavaThread* thread, oop obj) override;
 598 
 599   void resize_heap_after_young_collection(size_t allocation_word_size);
 600   void resize_heap_after_full_collection(size_t allocation_word_size);
 601 
 602   // Check if there is memory to uncommit and if so schedule a task to do it.
 603   void uncommit_regions_if_necessary();
 604   // Immediately uncommit uncommittable regions.
 605   uint uncommit_regions(uint region_limit);
 606   bool has_uncommittable_regions();
 607 
 608   G1NUMA* numa() const { return _numa; }
 609 
 610   // Expand the garbage-first heap by at least the given size (in bytes!).
 611   // Returns true if the heap was expanded by the requested amount;
 612   // false otherwise.
 613   // (Rounds up to a G1HeapRegion boundary.)
 614   bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers);
 615   bool expand_single_region(uint node_index);
 616 
 617   // Returns the PLAB statistics for a given destination.
 618   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 619 
 620   // Determines PLAB size for a given destination.
 621   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 622   // Clamp the given PLAB word size to allowed values. Prevents humongous PLAB sizes
 623   // for two reasons:
 624   // * PLABs are allocated using a similar paths as oops, but should
 625   //   never be in a humongous region
 626   // * Allowing humongous PLABs needlessly churns the region free lists
 627   inline size_t clamp_plab_size(size_t value) const;
 628 
 629   // Do anything common to GC's.
 630   void gc_prologue(bool full);
 631   void gc_epilogue(bool full);
 632 
 633   // Can concurrent mark process this object immediately, i.e. mark as live without the need
 634   // of pushing it on the mark stack (to process references)?
 635   // Used to keep objects that are potentially eagerly reclaimed out of the mark stack.
 636   // Its klass may still need to be handled.
 637   inline bool can_be_marked_through_immediately(oop obj) const;
 638   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 639   bool is_potential_eager_reclaim_candidate(G1HeapRegion* r) const;
 640 
 641   inline bool is_humongous_reclaim_candidate(uint region);
 642 
 643   // Remove from the reclaim candidate set.  Also remove from the
 644   // collection set so that later encounters avoid the slow path.
 645   inline void set_humongous_is_live(oop obj);
 646 
 647   // Register the given region to be part of the collection set.
 648   inline void register_humongous_candidate_region_with_region_attr(uint index);
 649 
 650   void set_humongous_metadata(G1HeapRegion* first_hr,
 651                               uint num_regions,
 652                               size_t word_size,
 653                               bool update_remsets);
 654 
 655   // The following methods update the region attribute table, i.e. a compact
 656   // representation of per-region information that is regularly accessed
 657   // during GC.
 658   inline void register_young_region_with_region_attr(G1HeapRegion* r);
 659   inline void register_new_survivor_region_with_region_attr(G1HeapRegion* r);
 660   inline void register_old_collection_set_region_with_region_attr(G1HeapRegion* r);
 661   inline void register_optional_region_with_region_attr(G1HeapRegion* r);
 662 
 663   // Updates region state without overwriting the type in the region attribute table.
 664   inline void update_region_attr(G1HeapRegion* r);
 665 
 666   void clear_region_attr(const G1HeapRegion* hr) {
 667     _region_attr.clear(hr);
 668   }
 669 
 670   void clear_region_attr() {
 671     _region_attr.clear();
 672   }
 673 
 674   // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
 675   // for all regions.
 676   void verify_region_attr_is_remset_tracked() PRODUCT_RETURN;
 677 
 678   void clear_bitmap_for_region(G1HeapRegion* hr);
 679 
 680   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 681 
 682   // This is called at the start of either a concurrent cycle or a Full
 683   // GC to update the number of old marking cycles started.
 684   void increment_old_marking_cycles_started();
 685 
 686   // This is called at the end of either a concurrent cycle or a Full
 687   // GC to update the number of old marking cycles completed. Those two
 688   // can happen in a nested fashion, i.e., we start a concurrent
 689   // cycle, a Full GC happens half-way through it which ends first,
 690   // and then the cycle notices that a Full GC happened and ends
 691   // too. The concurrent parameter is a boolean to help us do a bit
 692   // tighter consistency checking in the method. If concurrent is
 693   // false, the caller is the inner caller in the nesting (i.e., the
 694   // Full GC). If concurrent is true, the caller is the outer caller
 695   // in this nesting (i.e., the concurrent cycle). Further nesting is
 696   // not currently supported. The end of this call also notifies
 697   // the G1OldGCCount_lock in case a Java thread is waiting for a full
 698   // GC to happen (e.g., it called System.gc() with
 699   // +ExplicitGCInvokesConcurrent).
 700   // whole_heap_examined should indicate that during that old marking
 701   // cycle the whole heap has been examined for live objects (as opposed
 702   // to only parts, or aborted before completion).
 703   void increment_old_marking_cycles_completed(bool concurrent, bool whole_heap_examined);
 704 
 705   uint old_marking_cycles_started() const {
 706     return _old_marking_cycles_started;
 707   }
 708 
 709   uint old_marking_cycles_completed() const {
 710     return _old_marking_cycles_completed;
 711   }
 712 
 713   // Allocates a new heap region instance.
 714   G1HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 715 
 716   // Frees a region by resetting its metadata and adding it to the free list
 717   // passed as a parameter (this is usually a local list which will be appended
 718   // to the master free list later or null if free list management is handled
 719   // in another way).
 720   // Callers must ensure they are the only one calling free on the given region
 721   // at the same time.
 722   void free_region(G1HeapRegion* hr, G1FreeRegionList* free_list);
 723 
 724   // Add the given region to the retained regions collection set candidates.
 725   void retain_region(G1HeapRegion* hr);
 726 
 727   // Frees a humongous region by collapsing it into individual regions
 728   // and calling free_region() for each of them. The freed regions
 729   // will be added to the free list that's passed as a parameter (this
 730   // is usually a local list which will be appended to the master free
 731   // list later).
 732   // The method assumes that only a single thread is ever calling
 733   // this for a particular region at once.
 734   void free_humongous_region(G1HeapRegion* hr,
 735                              G1FreeRegionList* free_list);
 736 
 737   // Execute func(G1HeapRegion* r, bool is_last) on every region covered by the
 738   // given range.
 739   template <typename Func>
 740   void iterate_regions_in_range(MemRegion range, const Func& func);
 741 
 742   // Commit the required number of G1 region(s) according to the size requested
 743   // and mark them as 'old' region(s).
 744   // This API is only used for allocating heap space for the archived heap objects
 745   // in the CDS archive.
 746   HeapWord* alloc_archive_region(size_t word_size);
 747 
 748   // Populate the G1BlockOffsetTable for archived regions with the given
 749   // memory range.
 750   void populate_archive_regions_bot(MemRegion range);
 751 
 752   // For the specified range, uncommit the containing G1 regions
 753   // which had been allocated by alloc_archive_regions. This should be called
 754   // at JVM init time if the archive heap's contents cannot be used (e.g., if
 755   // CRC check fails).
 756   void dealloc_archive_regions(MemRegion range);
 757 
 758 private:
 759 
 760   // Shrink the garbage-first heap by at most the given size (in bytes!).
 761   // (Rounds down to a G1HeapRegion boundary.)
 762   void shrink(size_t shrink_bytes);
 763   void shrink_helper(size_t expand_bytes);
 764 
 765   // Schedule the VM operation that will do an evacuation pause to
 766   // satisfy an allocation request of word_size. *succeeded will
 767   // return whether the VM operation was successful (it did do an
 768   // evacuation pause) or not (another thread beat us to it or the GC
 769   // locker was active). Given that we should not be holding the
 770   // Heap_lock when we enter this method, we will pass the
 771   // gc_count_before (i.e., total_collections()) as a parameter since
 772   // it has to be read while holding the Heap_lock. Currently, both
 773   // methods that call do_collection_pause() release the Heap_lock
 774   // before the call, so it's easy to read gc_count_before just before.
 775   HeapWord* do_collection_pause(size_t word_size,
 776                                 uint gc_count_before,
 777                                 bool* succeeded,
 778                                 GCCause::Cause gc_cause);
 779 
 780   // Perform an incremental collection at a safepoint, possibly followed by a
 781   // by-policy upgrade to a full collection.
 782   // The collection should expect to be followed by an allocation of allocation_word_size.
 783   // precondition: at safepoint on VM thread
 784   // precondition: !is_stw_gc_active()
 785   void do_collection_pause_at_safepoint(size_t allocation_word_size);
 786 
 787   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 788   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 789 
 790 public:
 791   // Start a concurrent cycle.
 792   void start_concurrent_cycle(bool concurrent_operation_is_full_mark);
 793 
 794   void prepare_for_mutator_after_young_collection();
 795 
 796   void retire_tlabs();
 797 
 798   // Update all region's pin counts from the per-thread caches and resets them.
 799   // Must be called before any decision based on pin counts.
 800   void flush_region_pin_cache();
 801 
 802   void record_obj_copy_mem_stats();
 803 
 804 private:
 805   // The g1 remembered set of the heap.
 806   G1RemSet* _rem_set;
 807   // Global card set configuration
 808   G1CardSetConfiguration _card_set_config;
 809 
 810   G1MonotonicArenaFreePool _card_set_freelist_pool;
 811 
 812   // Group cardsets
 813   G1CSetCandidateGroup _young_regions_cset_group;
 814 
 815 public:
 816   G1CardSetConfiguration* card_set_config() { return &_card_set_config; }
 817 
 818   G1CSetCandidateGroup* young_regions_cset_group() { return &_young_regions_cset_group; }
 819 
 820   // After a collection pause, reset eden and the collection set.
 821   void clear_eden();
 822   void clear_collection_set();
 823 
 824   // Abandon the current collection set without recording policy
 825   // statistics or updating free lists.
 826   void abandon_collection_set();
 827 
 828   // The concurrent marker (and the thread it runs in.)
 829   G1ConcurrentMark* _cm;
 830 
 831   // The concurrent refiner.
 832   G1ConcurrentRefine* _cr;
 833 
 834   // Reusable parallel task queues and partial array manager.
 835   G1ScannerTasksQueueSet* _task_queues;
 836   PartialArrayStateManager* _partial_array_state_manager;
 837 
 838   // ("Weak") Reference processing support.
 839   //
 840   // G1 has 2 instances of the reference processor class.
 841   //
 842   // One (_ref_processor_cm) handles reference object discovery and subsequent
 843   // processing during concurrent marking cycles. Discovery is enabled/disabled
 844   // at the start/end of a concurrent marking cycle.
 845   //
 846   // The other (_ref_processor_stw) handles reference object discovery and
 847   // processing during incremental evacuation pauses and full GC pauses.
 848   //
 849   // ## Incremental evacuation pauses
 850   //
 851   // STW ref processor discovery is enabled/disabled at the start/end of an
 852   // incremental evacuation pause. No particular handling of the CM ref
 853   // processor is needed, apart from treating the discovered references as
 854   // roots; CM discovery does not need to be temporarily disabled as all
 855   // marking threads are paused during incremental evacuation pauses.
 856   //
 857   // ## Full GC pauses
 858   //
 859   // We abort any ongoing concurrent marking cycle, disable CM discovery, and
 860   // temporarily substitute a new closure for the STW ref processor's
 861   // _is_alive_non_header field (old value is restored after the full GC). Then
 862   // STW ref processor discovery is enabled, and marking & compaction
 863   // commences.
 864 
 865   // The (stw) reference processor...
 866   ReferenceProcessor* _ref_processor_stw;
 867 
 868   // During reference object discovery, the _is_alive_non_header
 869   // closure (if non-null) is applied to the referent object to
 870   // determine whether the referent is live. If so then the
 871   // reference object does not need to be 'discovered' and can
 872   // be treated as a regular oop. This has the benefit of reducing
 873   // the number of 'discovered' reference objects that need to
 874   // be processed.
 875   //
 876   // Instance of the is_alive closure for embedding into the
 877   // STW reference processor as the _is_alive_non_header field.
 878   // Supplying a value for the _is_alive_non_header field is
 879   // optional but doing so prevents unnecessary additions to
 880   // the discovered lists during reference discovery.
 881   G1STWIsAliveClosure _is_alive_closure_stw;
 882 
 883   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 884 
 885   // The (concurrent marking) reference processor...
 886   ReferenceProcessor* _ref_processor_cm;
 887 
 888   // Instance of the concurrent mark is_alive closure for embedding
 889   // into the Concurrent Marking reference processor as the
 890   // _is_alive_non_header field. Supplying a value for the
 891   // _is_alive_non_header field is optional but doing so prevents
 892   // unnecessary additions to the discovered lists during reference
 893   // discovery.
 894   G1CMIsAliveClosure _is_alive_closure_cm;
 895 
 896   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 897 public:
 898 
 899   G1ScannerTasksQueueSet* task_queues() const;
 900   G1ScannerTasksQueue* task_queue(uint i) const;
 901 
 902   PartialArrayStateManager* partial_array_state_manager() const;
 903 
 904   // Create a G1CollectedHeap.
 905   // Must call the initialize method afterwards.
 906   // May not return if something goes wrong.
 907   G1CollectedHeap();
 908 
 909 private:
 910   jint initialize_concurrent_refinement();
 911   jint initialize_service_thread();
 912 
 913   void print_tracing_info() const override;
 914   void stop() override;
 915 
 916 public:
 917   // Initialize the G1CollectedHeap to have the initial and
 918   // maximum sizes and remembered and barrier sets
 919   // specified by the policy object.
 920   jint initialize() override;
 921 
 922   void safepoint_synchronize_begin() override;
 923   void safepoint_synchronize_end() override;
 924 
 925   jlong last_refinement_epoch_start() const { return _last_refinement_epoch_start; }
 926   void set_last_refinement_epoch_start(jlong epoch_start, jlong last_yield_duration);
 927   jlong yield_duration_in_refinement_epoch();
 928 
 929   // Does operations required after initialization has been done.
 930   void post_initialize() override;
 931 
 932   // Initialize weak reference processing.
 933   void ref_processing_init();
 934 
 935   Name kind() const override {
 936     return CollectedHeap::G1;
 937   }
 938 
 939   const char* name() const override {
 940     return "G1";
 941   }
 942 
 943   const G1CollectorState* collector_state() const { return &_collector_state; }
 944   G1CollectorState* collector_state() { return &_collector_state; }
 945 
 946   // The current policy object for the collector.
 947   G1Policy* policy() const { return _policy; }
 948   // The remembered set.
 949   G1RemSet* rem_set() const { return _rem_set; }
 950 
 951   const G1MonotonicArenaFreePool* card_set_freelist_pool() const { return &_card_set_freelist_pool; }
 952   G1MonotonicArenaFreePool* card_set_freelist_pool() { return &_card_set_freelist_pool; }
 953 
 954   inline G1GCPhaseTimes* phase_times() const;
 955 
 956   const G1CollectionSet* collection_set() const { return &_collection_set; }
 957   G1CollectionSet* collection_set() { return &_collection_set; }
 958 
 959   inline bool is_collection_set_candidate(const G1HeapRegion* r) const;
 960 
 961   void initialize_serviceability() override;
 962   MemoryUsage memory_usage() override;
 963   GrowableArray<GCMemoryManager*> memory_managers() override;
 964   GrowableArray<MemoryPool*> memory_pools() override;
 965 
 966   void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) override;
 967 
 968   static void start_codecache_marking_cycle_if_inactive(bool concurrent_mark_start);
 969   static void finish_codecache_marking_cycle();
 970 
 971   // The shared block offset table array.
 972   G1BlockOffsetTable* bot() const { return _bot; }
 973 
 974   // Reference Processing accessors
 975 
 976   // The STW reference processor....
 977   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
 978 
 979   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
 980   STWGCTimer* gc_timer_stw() const { return _gc_timer_stw; }
 981 
 982   // The Concurrent Marking reference processor...
 983   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 984 
 985   size_t unused_committed_regions_in_bytes() const;
 986 
 987   size_t capacity() const override;
 988   size_t used() const override;
 989   // This should be called when we're not holding the heap lock. The
 990   // result might be a bit inaccurate.
 991   size_t used_unlocked() const;
 992   size_t recalculate_used() const;
 993 
 994   // These virtual functions do the actual allocation.
 995   // Some heaps may offer a contiguous region for shared non-blocking
 996   // allocation, via inlined code (by exporting the address of the top and
 997   // end fields defining the extent of the contiguous allocation region.)
 998   // But G1CollectedHeap doesn't yet support this.
 999 
1000   // Returns true if an incremental GC should be upgrade to a full gc. This
1001   // is done when there are no free regions and the heap can't be expanded.
1002   bool should_upgrade_to_full_gc() const {
1003     return num_available_regions() == 0;
1004   }
1005 
1006   // The number of inactive regions.
1007   uint num_inactive_regions() const { return _hrm.num_inactive_regions(); }
1008 
1009   // The current number of regions in the heap.
1010   uint num_committed_regions() const { return _hrm.num_committed_regions(); }
1011 
1012   // The max number of regions reserved for the heap.
1013   uint max_num_regions() const { return _hrm.max_num_regions(); }
1014 
1015   // The number of regions that are completely free.
1016   uint num_free_regions() const { return _hrm.num_free_regions(); }
1017 
1018   // The number of regions that are not completely free.
1019   uint num_used_regions() const { return _hrm.num_used_regions(); }
1020 
1021   // The number of regions that can be allocated into.
1022   uint num_available_regions() const { return num_free_regions() + num_inactive_regions(); }
1023 
1024   MemoryUsage get_auxiliary_data_memory_usage() const {
1025     return _hrm.get_auxiliary_data_memory_usage();
1026   }
1027 
1028 #ifdef ASSERT
1029   bool is_on_master_free_list(G1HeapRegion* hr) {
1030     return _hrm.is_free(hr);
1031   }
1032 #endif // ASSERT
1033 
1034   inline void old_set_add(G1HeapRegion* hr);
1035   inline void old_set_remove(G1HeapRegion* hr);
1036 
1037   // Returns how much memory there is assigned to non-young heap that can not be
1038   // allocated into any more without garbage collection after a hypothetical
1039   // allocation of allocation_word_size.
1040   size_t non_young_occupancy_after_allocation(size_t allocation_word_size) const;
1041 
1042   // Determine whether the given region is one that we are using as an
1043   // old GC alloc region.
1044   bool is_old_gc_alloc_region(G1HeapRegion* hr);
1045 
1046   void collect(GCCause::Cause cause) override;
1047 
1048   // Try to perform a collection of the heap with the given cause to allocate allocation_word_size
1049   // words.
1050   // Returns whether this collection actually executed.
1051   bool try_collect(size_t allocation_word_size, GCCause::Cause cause, const G1GCCounters& counters_before);
1052 
1053   void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause);
1054 
1055   bool last_gc_was_periodic() { return _gc_lastcause == GCCause::_g1_periodic_collection; }
1056 
1057   void remove_from_old_gen_sets(const uint old_regions_removed,
1058                                 const uint humongous_regions_removed);
1059   void prepend_to_freelist(G1FreeRegionList* list);
1060   void decrement_summary_bytes(size_t bytes);
1061 
1062   bool is_in(const void* p) const override;
1063 
1064   // Return "TRUE" iff the given object address is within the collection
1065   // set. Assumes that the reference points into the heap.
1066   inline bool is_in_cset(const G1HeapRegion* hr) const;
1067   inline bool is_in_cset(oop obj) const;
1068   inline bool is_in_cset(HeapWord* addr) const;
1069 
1070   inline bool is_in_cset_or_humongous_candidate(const oop obj);
1071 
1072  private:
1073   // This array is used for a quick test on whether a reference points into
1074   // the collection set or not. Each of the array's elements denotes whether the
1075   // corresponding region is in the collection set or not.
1076   G1HeapRegionAttrBiasedMappedArray _region_attr;
1077 
1078  public:
1079 
1080   inline G1HeapRegionAttr region_attr(const void* obj) const;
1081   inline G1HeapRegionAttr region_attr(uint idx) const;
1082 
1083   MemRegion reserved() const {
1084     return _hrm.reserved();
1085   }
1086 
1087   bool is_in_reserved(const void* addr) const {
1088     return reserved().contains(addr);
1089   }
1090 
1091   G1CardTable* card_table() const {
1092     return static_cast<G1CardTable*>(G1BarrierSet::g1_barrier_set()->card_table());
1093   }
1094 
1095   G1CardTable* refinement_table() const {
1096     return G1BarrierSet::g1_barrier_set()->refinement_table();
1097   }
1098 
1099   G1CardTable::CardValue* card_table_base() const {
1100     assert(card_table() != nullptr, "must be");
1101     return card_table()->byte_map_base();
1102   }
1103 
1104   // Iteration functions.
1105 
1106   void object_iterate_parallel(ObjectClosure* cl, uint worker_id, G1HeapRegionClaimer* claimer);
1107 
1108   // Iterate over all objects, calling "cl.do_object" on each.
1109   void object_iterate(ObjectClosure* cl) override;
1110 
1111   ParallelObjectIteratorImpl* parallel_object_iterator(uint thread_num) override;
1112 
1113   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1114   void keep_alive(oop obj) override;
1115 
1116   // Iterate over heap regions, in address order, terminating the
1117   // iteration early if the "do_heap_region" method returns "true".
1118   void heap_region_iterate(G1HeapRegionClosure* blk) const;
1119   void heap_region_iterate(G1HeapRegionIndexClosure* blk) const;
1120 
1121   // Return the region with the given index. It assumes the index is valid.
1122   inline G1HeapRegion* region_at(uint index) const;
1123   inline G1HeapRegion* region_at_or_null(uint index) const;
1124 
1125   // Iterate over the regions that the humongous object starting at the given
1126   // region and apply the given method with the signature f(G1HeapRegion*) on them.
1127   template <typename Func>
1128   void humongous_obj_regions_iterate(G1HeapRegion* start, const Func& f);
1129 
1130   // Calculate the region index of the given address. Given address must be
1131   // within the heap.
1132   inline uint addr_to_region(const void* addr) const;
1133 
1134   inline HeapWord* bottom_addr_for_region(uint index) const;
1135 
1136   // Two functions to iterate over the heap regions in parallel. Threads
1137   // compete using the G1HeapRegionClaimer to claim the regions before
1138   // applying the closure on them.
1139   // The _from_worker_offset version uses the G1HeapRegionClaimer and
1140   // the worker id to calculate a start offset to prevent all workers to
1141   // start from the point.
1142   void heap_region_par_iterate_from_worker_offset(G1HeapRegionClosure* cl,
1143                                                   G1HeapRegionClaimer* hrclaimer,
1144                                                   uint worker_id) const;
1145 
1146   void heap_region_par_iterate_from_start(G1HeapRegionClosure* cl,
1147                                           G1HeapRegionClaimer* hrclaimer) const;
1148 
1149   // Iterate over all regions in the collection set in parallel.
1150   void collection_set_par_iterate_all(G1HeapRegionClosure* cl,
1151                                       G1HeapRegionClaimer* hr_claimer,
1152                                       uint worker_id);
1153 
1154   // Iterate over all regions currently in the current collection set.
1155   void collection_set_iterate_all(G1HeapRegionClosure* blk);
1156 
1157   // Iterate over the regions in the current increment of the collection set.
1158   // Starts the iteration so that the start regions of a given worker id over the
1159   // set active_workers are evenly spread across the set of collection set regions
1160   // to be iterated.
1161   // The variant with the G1HeapRegionClaimer guarantees that the closure will be
1162   // applied to a particular region exactly once.
1163   void collection_set_iterate_increment_from(G1HeapRegionClosure *blk, uint worker_id) {
1164     collection_set_iterate_increment_from(blk, nullptr, worker_id);
1165   }
1166   void collection_set_iterate_increment_from(G1HeapRegionClosure *blk, G1HeapRegionClaimer* hr_claimer, uint worker_id);
1167   // Iterate over the array of region indexes, uint regions[length], applying
1168   // the given G1HeapRegionClosure on each region. The worker_id will determine where
1169   // to start the iteration to allow for more efficient parallel iteration.
1170   void par_iterate_regions_array(G1HeapRegionClosure* cl,
1171                                  G1HeapRegionClaimer* hr_claimer,
1172                                  const uint regions[],
1173                                  size_t length,
1174                                  uint worker_id) const;
1175 
1176   // Returns the G1HeapRegion that contains addr. addr must not be null.
1177   inline G1HeapRegion* heap_region_containing(const void* addr) const;
1178 
1179   // Returns the G1HeapRegion that contains addr, or null if that is an uncommitted
1180   // region. addr must not be null.
1181   inline G1HeapRegion* heap_region_containing_or_null(const void* addr) const;
1182 
1183   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1184   // each address in the (reserved) heap is a member of exactly
1185   // one block.  The defining characteristic of a block is that it is
1186   // possible to find its size, and thus to progress forward to the next
1187   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1188   // represent Java objects, or they might be free blocks in a
1189   // free-list-based heap (or subheap), as long as the two kinds are
1190   // distinguishable and the size of each is determinable.
1191 
1192   // Returns the address of the start of the "block" that contains the
1193   // address "addr".  We say "blocks" instead of "object" since some heaps
1194   // may not pack objects densely; a chunk may either be an object or a
1195   // non-object.
1196   HeapWord* block_start(const void* addr) const;
1197 
1198   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1199   // the block is an object.
1200   bool block_is_obj(const HeapWord* addr) const;
1201 
1202   // Section on thread-local allocation buffers (TLABs)
1203   // See CollectedHeap for semantics.
1204 
1205   size_t tlab_capacity() const override;
1206   size_t tlab_used() const override;
1207   size_t max_tlab_size() const override;
1208   size_t unsafe_max_tlab_alloc() const override;
1209 
1210   inline bool is_in_young(const oop obj) const;
1211   inline bool requires_barriers(stackChunkOop obj) const override;
1212 
1213   // Returns "true" iff the given word_size is "very large".
1214   static bool is_humongous(size_t word_size) {
1215     // Note this has to be strictly greater-than as the TLABs
1216     // are capped at the humongous threshold and we want to
1217     // ensure that we don't try to allocate a TLAB as
1218     // humongous and that we don't allocate a humongous
1219     // object in a TLAB.
1220     return word_size > _humongous_object_threshold_in_words;
1221   }
1222 
1223   // Returns the humongous threshold for a specific region size
1224   static size_t humongous_threshold_for(size_t region_size) {
1225     return (region_size / 2);
1226   }
1227 
1228   // Returns the number of regions the humongous object of the given word size
1229   // requires.
1230   static size_t humongous_obj_size_in_regions(size_t word_size);
1231 
1232   // Returns how much space in bytes an allocation of word_size will use up in the
1233   // heap.
1234   static size_t allocation_used_bytes(size_t word_size);
1235 
1236   // Print the maximum heap capacity.
1237   size_t max_capacity() const override;
1238   size_t min_capacity() const;
1239 
1240   Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }
1241 
1242   // Convenience function to be used in situations where the heap type can be
1243   // asserted to be this type.
1244   static G1CollectedHeap* heap() {
1245     return named_heap<G1CollectedHeap>(CollectedHeap::G1);
1246   }
1247 
1248   // add appropriate methods for any other surv rate groups
1249 
1250   G1SurvivorRegions* survivor() { return &_survivor; }
1251 
1252   inline uint eden_target_length() const;
1253   uint eden_regions_count() const { return _eden.length(); }
1254   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1255   uint survivor_regions_count() const { return _survivor.length(); }
1256   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1257   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1258   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1259   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1260   uint old_regions_count() const { return _old_set.length(); }
1261   uint humongous_regions_count() const { return _humongous_set.length(); }
1262 
1263 #ifdef ASSERT
1264   bool check_young_list_empty();
1265 #endif
1266 
1267   bool is_marked(oop obj) const;
1268 
1269   // Determine if an object is dead, given the object and also
1270   // the region to which the object belongs.
1271   inline bool is_obj_dead(const oop obj, const G1HeapRegion* hr) const;
1272 
1273   // Determine if an object is dead, given only the object itself.
1274   // This will find the region to which the object belongs and
1275   // then call the region version of the same function.
1276   // If obj is null it is not dead.
1277   inline bool is_obj_dead(const oop obj) const;
1278 
1279   inline bool is_obj_dead_full(const oop obj, const G1HeapRegion* hr) const;
1280   inline bool is_obj_dead_full(const oop obj) const;
1281 
1282   // Mark the live object that failed evacuation in the bitmap.
1283   void mark_evac_failure_object(oop obj) const;
1284 
1285   G1ConcurrentMark* concurrent_mark() const { return _cm; }
1286 
1287   // Refinement
1288 
1289   G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1290 
1291   // Optimized nmethod scanning support routines
1292 
1293   // Register the given nmethod with the G1 heap.
1294   void register_nmethod(nmethod* nm) override;
1295 
1296   // Unregister the given nmethod from the G1 heap.
1297   void unregister_nmethod(nmethod* nm) override;
1298 
1299   // No nmethod verification implemented.
1300   void verify_nmethod(nmethod* nm) override {}
1301 
1302   // Recalculate amount of used memory after GC. Must be called after all allocation
1303   // has finished.
1304   void update_used_after_gc(bool evacuation_failed);
1305 
1306   // Rebuild the code root lists for each region
1307   // after a full GC.
1308   void rebuild_code_roots();
1309 
1310   // Performs cleaning of data structures after class unloading.
1311   void complete_cleaning(bool class_unloading_occurred);
1312 
1313   void unload_classes_and_code(const char* description, BoolObjectClosure* cl, GCTimer* timer);
1314 
1315   void bulk_unregister_nmethods();
1316 
1317   // Verification
1318 
1319   // Perform any cleanup actions necessary before allowing a verification.
1320   void prepare_for_verify() override;
1321 
1322   // Perform verification.
1323   void verify(VerifyOption vo) override;
1324 
1325   // WhiteBox testing support.
1326   bool supports_concurrent_gc_breakpoints() const override;
1327 
1328   WorkerThreads* safepoint_workers() override { return _workers; }
1329 
1330   // The methods below are here for convenience and dispatch the
1331   // appropriate method depending on value of the given VerifyOption
1332   // parameter. The values for that parameter, and their meanings,
1333   // are the same as those above.
1334 
1335   bool is_obj_dead_cond(const oop obj,
1336                         const G1HeapRegion* hr,
1337                         const VerifyOption vo) const;
1338 
1339   bool is_obj_dead_cond(const oop obj,
1340                         const VerifyOption vo) const;
1341 
1342   G1HeapSummary create_g1_heap_summary();
1343   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1344 
1345   // Printing
1346 private:
1347   void print_heap_regions() const;
1348   void print_regions_on(outputStream* st) const;
1349 
1350 public:
1351   void print_heap_on(outputStream* st) const override;
1352   void print_extended_on(outputStream* st) const;
1353   void print_gc_on(outputStream* st) const override;
1354 
1355   void gc_threads_do(ThreadClosure* tc) const override;
1356 
1357   // Used to print information about locations in the hs_err file.
1358   bool print_location(outputStream* st, void* addr) const override;
1359 };
1360 
1361 // Scoped object that performs common pre- and post-gc heap printing operations.
1362 class G1HeapPrinterMark : public StackObj {
1363   G1CollectedHeap* _g1h;
1364   G1HeapTransition _heap_transition;
1365 
1366 public:
1367   G1HeapPrinterMark(G1CollectedHeap* g1h);
1368   ~G1HeapPrinterMark();
1369 };
1370 
1371 // Scoped object that performs common pre- and post-gc operations related to
1372 // JFR events.
1373 class G1JFRTracerMark : public StackObj {
1374 protected:
1375   STWGCTimer* _timer;
1376   GCTracer* _tracer;
1377 
1378 public:
1379   G1JFRTracerMark(STWGCTimer* timer, GCTracer* tracer);
1380   ~G1JFRTracerMark();
1381 };
1382 
1383 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP