1 /*
   2  * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CardSet.hpp"
  32 #include "gc/g1/g1CollectionSet.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ConcurrentMark.hpp"
  35 #include "gc/g1/g1EdenRegions.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1GCPauseType.hpp"
  38 #include "gc/g1/g1HeapRegionAttr.hpp"
  39 #include "gc/g1/g1HeapTransition.hpp"
  40 #include "gc/g1/g1HeapVerifier.hpp"
  41 #include "gc/g1/g1HRPrinter.hpp"
  42 #include "gc/g1/g1MonitoringSupport.hpp"
  43 #include "gc/g1/g1NUMA.hpp"
  44 #include "gc/g1/g1SegmentedArrayFreeMemoryTask.hpp"
  45 #include "gc/g1/g1SurvivorRegions.hpp"
  46 #include "gc/g1/g1YoungGCEvacFailureInjector.hpp"
  47 #include "gc/g1/heapRegionManager.hpp"
  48 #include "gc/g1/heapRegionSet.hpp"
  49 #include "gc/shared/barrierSet.hpp"
  50 #include "gc/shared/collectedHeap.hpp"
  51 #include "gc/shared/gcHeapSummary.hpp"
  52 #include "gc/shared/plab.hpp"
  53 #include "gc/shared/softRefPolicy.hpp"
  54 #include "gc/shared/taskqueue.hpp"
  55 #include "memory/allocation.hpp"
  56 #include "memory/iterator.hpp"
  57 #include "memory/memRegion.hpp"
  58 #include "utilities/bitMap.hpp"
  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.
  64 
  65 // Forward declarations
  66 class G1Allocator;
  67 class G1ArchiveAllocator;
  68 class G1BatchedTask;
  69 class G1CardTableEntryClosure;
  70 class G1ConcurrentMark;
  71 class G1ConcurrentMarkThread;
  72 class G1ConcurrentRefine;
  73 class G1GCCounters;
  74 class G1GCPhaseTimes;
  75 class G1HeapSizingPolicy;
  76 class G1HotCardCache;
  77 class G1NewTracer;
  78 class G1RemSet;
  79 class G1ServiceTask;
  80 class G1ServiceThread;
  81 class GCMemoryManager;
  82 class HeapRegion;
  83 class MemoryPool;
  84 class nmethod;
  85 class ReferenceProcessor;
  86 class STWGCTimer;
  87 class WorkerThreads;
  88 
  89 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  90 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  91 
  92 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
  93 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  94 
  95 // The G1 STW is alive closure.
  96 // An instance is embedded into the G1CH and used as the
  97 // (optional) _is_alive_non_header closure in the STW
  98 // reference processor. It is also extensively used during
  99 // reference processing during STW evacuation pauses.
 100 class G1STWIsAliveClosure : public BoolObjectClosure {
 101   G1CollectedHeap* _g1h;
 102 public:
 103   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 104   bool do_object_b(oop p) override;
 105 };
 106 
 107 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 108   G1CollectedHeap* _g1h;
 109 public:
 110   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 111   bool do_object_b(oop p) override;
 112 };
 113 
 114 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 115  private:
 116   void reset_from_card_cache(uint start_idx, size_t num_regions);
 117  public:
 118   void on_commit(uint start_idx, size_t num_regions, bool zero_filled) override;
 119 };
 120 
 121 class G1CollectedHeap : public CollectedHeap {
 122   friend class VM_G1CollectForAllocation;
 123   friend class VM_G1CollectFull;
 124   friend class VM_G1TryInitiateConcMark;
 125   friend class VMStructs;
 126   friend class MutatorAllocRegion;
 127   friend class G1FullCollector;
 128   friend class G1GCAllocRegion;
 129   friend class G1HeapVerifier;
 130 
 131   friend class G1YoungGCVerifierMark;
 132 
 133   // Closures used in implementation.
 134   friend class G1EvacuateRegionsTask;
 135   friend class G1PLABAllocator;
 136 
 137   // Other related classes.
 138   friend class G1HeapPrinterMark;
 139   friend class HeapRegionClaimer;
 140 
 141   // Testing classes.
 142   friend class G1CheckRegionAttrTableClosure;
 143 
 144 private:
 145   G1ServiceThread* _service_thread;
 146   G1ServiceTask* _periodic_gc_task;
 147   G1SegmentedArrayFreeMemoryTask* _free_segmented_array_memory_task;
 148 
 149   WorkerThreads* _workers;
 150   G1CardTable* _card_table;
 151 
 152   Ticks _collection_pause_end;
 153 
 154   SoftRefPolicy      _soft_ref_policy;
 155 
 156   static size_t _humongous_object_threshold_in_words;
 157 
 158   // These sets keep track of old, archive and humongous regions respectively.
 159   HeapRegionSet _old_set;
 160   HeapRegionSet _archive_set;
 161   HeapRegionSet _humongous_set;
 162 
 163   // Young gen memory statistics before GC.
 164   G1SegmentedArrayMemoryStats _young_gen_card_set_stats;
 165   // Collection set candidates memory statistics after GC.
 166   G1SegmentedArrayMemoryStats _collection_set_candidates_card_set_stats;
 167 
 168   // The block offset table for the G1 heap.
 169   G1BlockOffsetTable* _bot;
 170 
 171 public:
 172   void rebuild_free_region_list();
 173   // Start a new incremental collection set for the next pause.
 174   void start_new_collection_set();
 175 
 176   void prepare_region_for_full_compaction(HeapRegion* hr);
 177 
 178 private:
 179   // Rebuilds the region sets / lists so that they are repopulated to
 180   // reflect the contents of the heap. The only exception is the
 181   // humongous set which was not torn down in the first place. If
 182   // free_list_only is true, it will only rebuild the free list.
 183   void rebuild_region_sets(bool free_list_only);
 184 
 185   // Callback for region mapping changed events.
 186   G1RegionMappingChangedListener _listener;
 187 
 188   // Handle G1 NUMA support.
 189   G1NUMA* _numa;
 190 
 191   // The sequence of all heap regions in the heap.
 192   HeapRegionManager _hrm;
 193 
 194   // Manages all allocations with regions except humongous object allocations.
 195   G1Allocator* _allocator;
 196 
 197   G1YoungGCEvacFailureInjector _evac_failure_injector;
 198 
 199   // Manages all heap verification.
 200   G1HeapVerifier* _verifier;
 201 
 202   // Outside of GC pauses, the number of bytes used in all regions other
 203   // than the current allocation region(s).
 204   volatile size_t _summary_bytes_used;
 205 
 206   void increase_used(size_t bytes);
 207   void decrease_used(size_t bytes);
 208 
 209   void set_used(size_t bytes);
 210 
 211   // Number of bytes used in all regions during GC. Typically changed when
 212   // retiring a GC alloc region.
 213   size_t _bytes_used_during_gc;
 214 
 215 public:
 216   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 217 
 218 private:
 219   // Class that handles archive allocation ranges.
 220   G1ArchiveAllocator* _archive_allocator;
 221 
 222   // GC allocation statistics policy for survivors.
 223   G1EvacStats _survivor_evac_stats;
 224 
 225   // GC allocation statistics policy for tenured objects.
 226   G1EvacStats _old_evac_stats;
 227 
 228   // Helper for monitoring and management support.
 229   G1MonitoringSupport* _monitoring_support;
 230 
 231   // Records whether the region at the given index is (still) a
 232   // candidate for eager reclaim.  Only valid for humongous start
 233   // regions; other regions have unspecified values.  Humongous start
 234   // regions are initialized at start of collection pause, with
 235   // candidates removed from the set as they are found reachable from
 236   // roots or the young generation.
 237   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 238   protected:
 239     bool default_value() const override { return false; }
 240   public:
 241     void clear() { G1BiasedMappedArray<bool>::clear(); }
 242     void set_candidate(uint region, bool value) {
 243       set_by_index(region, value);
 244     }
 245     bool is_candidate(uint region) {
 246       return get_by_index(region);
 247     }
 248   };
 249 
 250   HumongousReclaimCandidates _humongous_reclaim_candidates;
 251   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 252   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 253 public:
 254   uint num_humongous_objects() const { return _num_humongous_objects; }
 255   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 256   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 257 
 258   bool should_do_eager_reclaim() const;
 259 
 260   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 261 
 262   bool should_sample_collection_set_candidates() const;
 263   void set_collection_set_candidates_stats(G1SegmentedArrayMemoryStats& stats);
 264   void set_young_gen_card_set_stats(const G1SegmentedArrayMemoryStats& stats);
 265 
 266 private:
 267 
 268   G1HRPrinter _hr_printer;
 269 
 270   // Return true if an explicit GC should start a concurrent cycle instead
 271   // of doing a STW full GC. A concurrent cycle should be started if:
 272   // (a) cause == _g1_humongous_allocation,
 273   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 274   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 275   // (d) cause == _wb_conc_mark or _wb_breakpoint,
 276   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 277   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 278 
 279   // Attempt to start a concurrent cycle with the indicated cause.
 280   // precondition: should_do_concurrent_full_gc(cause)
 281   bool try_collect_concurrently(GCCause::Cause cause,
 282                                 uint gc_counter,
 283                                 uint old_marking_started_before);
 284 
 285   // indicates whether we are in young or mixed GC mode
 286   G1CollectorState _collector_state;
 287 
 288   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 289   // concurrent cycles) we have started.
 290   volatile uint _old_marking_cycles_started;
 291 
 292   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 293   // concurrent cycles) we have completed.
 294   volatile uint _old_marking_cycles_completed;
 295 
 296   // This is a non-product method that is helpful for testing. It is
 297   // called at the end of a GC and artificially expands the heap by
 298   // allocating a number of dead regions. This way we can induce very
 299   // frequent marking cycles and stress the cleanup / concurrent
 300   // cleanup code more (as all the regions that will be allocated by
 301   // this method will be found dead by the marking cycle).
 302   void allocate_dummy_regions() PRODUCT_RETURN;
 303 
 304   // Create a memory mapper for auxiliary data structures of the given size and
 305   // translation factor.
 306   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 307                                                          size_t size,
 308                                                          size_t translation_factor);
 309 
 310   void trace_heap(GCWhen::Type when, const GCTracer* tracer) override;
 311 
 312   // These are macros so that, if the assert fires, we get the correct
 313   // line number, file, etc.
 314 
 315 #define heap_locking_asserts_params(_extra_message_)                          \
 316   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 317   (_extra_message_),                                                          \
 318   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 319   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 320   BOOL_TO_STR(Thread::current()->is_VM_thread())
 321 
 322 #define assert_heap_locked()                                                  \
 323   do {                                                                        \
 324     assert(Heap_lock->owned_by_self(),                                        \
 325            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 326   } while (0)
 327 
 328 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 329   do {                                                                        \
 330     assert(Heap_lock->owned_by_self() ||                                      \
 331            (SafepointSynchronize::is_at_safepoint() &&                        \
 332              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 333            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 334                                         "should be at a safepoint"));         \
 335   } while (0)
 336 
 337 #define assert_heap_locked_and_not_at_safepoint()                             \
 338   do {                                                                        \
 339     assert(Heap_lock->owned_by_self() &&                                      \
 340                                     !SafepointSynchronize::is_at_safepoint(), \
 341           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 342                                        "should not be at a safepoint"));      \
 343   } while (0)
 344 
 345 #define assert_heap_not_locked()                                              \
 346   do {                                                                        \
 347     assert(!Heap_lock->owned_by_self(),                                       \
 348         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 349   } while (0)
 350 
 351 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 352   do {                                                                        \
 353     assert(!Heap_lock->owned_by_self() &&                                     \
 354                                     !SafepointSynchronize::is_at_safepoint(), \
 355       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 356                                    "should not be at a safepoint"));          \
 357   } while (0)
 358 
 359 #define assert_at_safepoint_on_vm_thread()                                    \
 360   do {                                                                        \
 361     assert_at_safepoint();                                                    \
 362     assert(Thread::current_or_null() != NULL, "no current thread");           \
 363     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 364   } while (0)
 365 
 366 #ifdef ASSERT
 367 #define assert_used_and_recalculate_used_equal(g1h)                           \
 368   do {                                                                        \
 369     size_t cur_used_bytes = g1h->used();                                      \
 370     size_t recal_used_bytes = g1h->recalculate_used();                        \
 371     assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
 372            " same as recalculated used(" SIZE_FORMAT ").",                    \
 373            cur_used_bytes, recal_used_bytes);                                 \
 374   } while (0)
 375 #else
 376 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
 377 #endif
 378 
 379   // The young region list.
 380   G1EdenRegions _eden;
 381   G1SurvivorRegions _survivor;
 382 
 383   STWGCTimer* _gc_timer_stw;
 384 
 385   G1NewTracer* _gc_tracer_stw;
 386 
 387   // The current policy object for the collector.
 388   G1Policy* _policy;
 389   G1HeapSizingPolicy* _heap_sizing_policy;
 390 
 391   G1CollectionSet _collection_set;
 392 
 393   // Try to allocate a single non-humongous HeapRegion sufficient for
 394   // an allocation of the given word_size. If do_expand is true,
 395   // attempt to expand the heap if necessary to satisfy the allocation
 396   // request. 'type' takes the type of region to be allocated. (Use constants
 397   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 398   HeapRegion* new_region(size_t word_size,
 399                          HeapRegionType type,
 400                          bool do_expand,
 401                          uint node_index = G1NUMA::AnyNodeIndex);
 402 
 403   // Initialize a contiguous set of free regions of length num_regions
 404   // and starting at index first so that they appear as a single
 405   // humongous region.
 406   HeapWord* humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
 407                                                       uint num_regions,
 408                                                       size_t word_size);
 409 
 410   // Attempt to allocate a humongous object of the given size. Return
 411   // NULL if unsuccessful.
 412   HeapWord* humongous_obj_allocate(size_t word_size);
 413 
 414   // The following two methods, allocate_new_tlab() and
 415   // mem_allocate(), are the two main entry points from the runtime
 416   // into the G1's allocation routines. They have the following
 417   // assumptions:
 418   //
 419   // * They should both be called outside safepoints.
 420   //
 421   // * They should both be called without holding the Heap_lock.
 422   //
 423   // * All allocation requests for new TLABs should go to
 424   //   allocate_new_tlab().
 425   //
 426   // * All non-TLAB allocation requests should go to mem_allocate().
 427   //
 428   // * If either call cannot satisfy the allocation request using the
 429   //   current allocating region, they will try to get a new one. If
 430   //   this fails, they will attempt to do an evacuation pause and
 431   //   retry the allocation.
 432   //
 433   // * If all allocation attempts fail, even after trying to schedule
 434   //   an evacuation pause, allocate_new_tlab() will return NULL,
 435   //   whereas mem_allocate() will attempt a heap expansion and/or
 436   //   schedule a Full GC.
 437   //
 438   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 439   //   should never be called with word_size being humongous. All
 440   //   humongous allocation requests should go to mem_allocate() which
 441   //   will satisfy them with a special path.
 442 
 443   HeapWord* allocate_new_tlab(size_t min_size,
 444                               size_t requested_size,
 445                               size_t* actual_size) override;
 446 
 447   HeapWord* mem_allocate(size_t word_size,
 448                          bool*  gc_overhead_limit_was_exceeded) override;
 449 
 450   // First-level mutator allocation attempt: try to allocate out of
 451   // the mutator alloc region without taking the Heap_lock. This
 452   // should only be used for non-humongous allocations.
 453   inline HeapWord* attempt_allocation(size_t min_word_size,
 454                                       size_t desired_word_size,
 455                                       size_t* actual_word_size);
 456 
 457   // Second-level mutator allocation attempt: take the Heap_lock and
 458   // retry the allocation attempt, potentially scheduling a GC
 459   // pause. This should only be used for non-humongous allocations.
 460   HeapWord* attempt_allocation_slow(size_t word_size);
 461 
 462   // Takes the Heap_lock and attempts a humongous allocation. It can
 463   // potentially schedule a GC pause.
 464   HeapWord* attempt_allocation_humongous(size_t word_size);
 465 
 466   // Allocation attempt that should be called during safepoints (e.g.,
 467   // at the end of a successful GC). expect_null_mutator_alloc_region
 468   // specifies whether the mutator alloc region is expected to be NULL
 469   // or not.
 470   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 471                                             bool expect_null_mutator_alloc_region);
 472 
 473   // These methods are the "callbacks" from the G1AllocRegion class.
 474 
 475   // For mutator alloc regions.
 476   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
 477   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 478                                    size_t allocated_bytes);
 479 
 480   // For GC alloc regions.
 481   bool has_more_regions(G1HeapRegionAttr dest);
 482   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
 483   void retire_gc_alloc_region(HeapRegion* alloc_region,
 484                               size_t allocated_bytes, G1HeapRegionAttr dest);
 485 
 486   // - if explicit_gc is true, the GC is for a System.gc() etc,
 487   //   otherwise it's for a failed allocation.
 488   // - if clear_all_soft_refs is true, all soft references should be
 489   //   cleared during the GC.
 490   // - if do_maximal_compaction is true, full gc will do a maximally
 491   //   compacting collection, leaving no dead wood.
 492   // - it returns false if it is unable to do the collection due to the
 493   //   GC locker being active, true otherwise.
 494   bool do_full_collection(bool explicit_gc,
 495                           bool clear_all_soft_refs,
 496                           bool do_maximal_compaction);
 497 
 498   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 499   void do_full_collection(bool clear_all_soft_refs) override;
 500 
 501   // Helper to do a full collection that clears soft references.
 502   bool upgrade_to_full_collection();
 503 
 504   // Callback from VM_G1CollectForAllocation operation.
 505   // This function does everything necessary/possible to satisfy a
 506   // failed allocation request (including collection, expansion, etc.)
 507   HeapWord* satisfy_failed_allocation(size_t word_size,
 508                                       bool* succeeded);
 509   // Internal helpers used during full GC to split it up to
 510   // increase readability.
 511   void abort_concurrent_cycle();
 512   void verify_before_full_collection(bool explicit_gc);
 513   void prepare_heap_for_full_collection();
 514   void prepare_heap_for_mutators();
 515   void abort_refinement();
 516   void verify_after_full_collection();
 517   void print_heap_after_full_collection();
 518 
 519   // Helper method for satisfy_failed_allocation()
 520   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 521                                              bool do_gc,
 522                                              bool maximal_compaction,
 523                                              bool expect_null_mutator_alloc_region,
 524                                              bool* gc_succeeded);
 525 
 526   // Attempting to expand the heap sufficiently
 527   // to support an allocation of the given "word_size".  If
 528   // successful, perform the allocation and return the address of the
 529   // allocated block, or else "NULL".
 530   HeapWord* expand_and_allocate(size_t word_size);
 531 
 532   void verify_numa_regions(const char* desc);
 533 
 534 public:
 535   // If during a concurrent start pause we may install a pending list head which is not
 536   // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
 537   // to discover.
 538   void make_pending_list_reachable();
 539 
 540   G1ServiceThread* service_thread() const { return _service_thread; }
 541 
 542   WorkerThreads* workers() const { return _workers; }
 543 
 544   // Run the given batch task using the workers.
 545   void run_batch_task(G1BatchedTask* cl);
 546 
 547   G1Allocator* allocator() {
 548     return _allocator;
 549   }
 550 
 551   G1YoungGCEvacFailureInjector* evac_failure_injector() { return &_evac_failure_injector; }
 552 
 553   G1HeapVerifier* verifier() {
 554     return _verifier;
 555   }
 556 
 557   G1MonitoringSupport* monitoring_support() {
 558     assert(_monitoring_support != nullptr, "should have been initialized");
 559     return _monitoring_support;
 560   }
 561 
 562   void resize_heap_if_necessary();
 563 
 564   // Check if there is memory to uncommit and if so schedule a task to do it.
 565   void uncommit_regions_if_necessary();
 566   // Immediately uncommit uncommittable regions.
 567   uint uncommit_regions(uint region_limit);
 568   bool has_uncommittable_regions();
 569 
 570   G1NUMA* numa() const { return _numa; }
 571 
 572   // Expand the garbage-first heap by at least the given size (in bytes!).
 573   // Returns true if the heap was expanded by the requested amount;
 574   // false otherwise.
 575   // (Rounds up to a HeapRegion boundary.)
 576   bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers = NULL, double* expand_time_ms = NULL);
 577   bool expand_single_region(uint node_index);
 578 
 579   // Returns the PLAB statistics for a given destination.
 580   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 581 
 582   // Determines PLAB size for a given destination.
 583   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 584 
 585   // Do anything common to GC's.
 586   void gc_prologue(bool full);
 587   void gc_epilogue(bool full);
 588 
 589   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 590   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 591 
 592   // Modify the reclaim candidate set and test for presence.
 593   // These are only valid for starts_humongous regions.
 594   inline void set_humongous_reclaim_candidate(uint region, bool value);
 595   inline bool is_humongous_reclaim_candidate(uint region);
 596 
 597   // Remove from the reclaim candidate set.  Also remove from the
 598   // collection set so that later encounters avoid the slow path.
 599   inline void set_humongous_is_live(oop obj);
 600 
 601   // Register the given region to be part of the collection set.
 602   inline void register_humongous_region_with_region_attr(uint index);
 603 
 604   // We register a region with the fast "in collection set" test. We
 605   // simply set to true the array slot corresponding to this region.
 606   void register_young_region_with_region_attr(HeapRegion* r) {
 607     _region_attr.set_in_young(r->hrm_index());
 608   }
 609   inline void register_new_survivor_region_with_region_attr(HeapRegion* r);
 610   inline void register_region_with_region_attr(HeapRegion* r);
 611   inline void register_old_region_with_region_attr(HeapRegion* r);
 612   inline void register_optional_region_with_region_attr(HeapRegion* r);
 613 
 614   void clear_region_attr(const HeapRegion* hr) {
 615     _region_attr.clear(hr);
 616   }
 617 
 618   void clear_region_attr() {
 619     _region_attr.clear();
 620   }
 621 
 622   // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
 623   // for all regions.
 624   void verify_region_attr_remset_is_tracked() PRODUCT_RETURN;
 625 
 626   void clear_prev_bitmap_for_region(HeapRegion* hr);
 627 
 628   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 629 
 630   // This is called at the start of either a concurrent cycle or a Full
 631   // GC to update the number of old marking cycles started.
 632   void increment_old_marking_cycles_started();
 633 
 634   // This is called at the end of either a concurrent cycle or a Full
 635   // GC to update the number of old marking cycles completed. Those two
 636   // can happen in a nested fashion, i.e., we start a concurrent
 637   // cycle, a Full GC happens half-way through it which ends first,
 638   // and then the cycle notices that a Full GC happened and ends
 639   // too. The concurrent parameter is a boolean to help us do a bit
 640   // tighter consistency checking in the method. If concurrent is
 641   // false, the caller is the inner caller in the nesting (i.e., the
 642   // Full GC). If concurrent is true, the caller is the outer caller
 643   // in this nesting (i.e., the concurrent cycle). Further nesting is
 644   // not currently supported. The end of this call also notifies
 645   // the G1OldGCCount_lock in case a Java thread is waiting for a full
 646   // GC to happen (e.g., it called System.gc() with
 647   // +ExplicitGCInvokesConcurrent).
 648   // whole_heap_examined should indicate that during that old marking
 649   // cycle the whole heap has been examined for live objects (as opposed
 650   // to only parts, or aborted before completion).
 651   void increment_old_marking_cycles_completed(bool concurrent, bool whole_heap_examined);
 652 
 653   uint old_marking_cycles_started() const {
 654     return _old_marking_cycles_started;
 655   }
 656 
 657   uint old_marking_cycles_completed() const {
 658     return _old_marking_cycles_completed;
 659   }
 660 
 661   G1HRPrinter* hr_printer() { return &_hr_printer; }
 662 
 663   // Allocates a new heap region instance.
 664   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 665 
 666   // Allocate the highest free region in the reserved heap. This will commit
 667   // regions as necessary.
 668   HeapRegion* alloc_highest_free_region();
 669 
 670   // Frees a region by resetting its metadata and adding it to the free list
 671   // passed as a parameter (this is usually a local list which will be appended
 672   // to the master free list later or NULL if free list management is handled
 673   // in another way).
 674   // Callers must ensure they are the only one calling free on the given region
 675   // at the same time.
 676   void free_region(HeapRegion* hr, FreeRegionList* free_list);
 677 
 678   // It dirties the cards that cover the block so that the post
 679   // write barrier never queues anything when updating objects on this
 680   // block. It is assumed (and in fact we assert) that the block
 681   // belongs to a young region.
 682   inline void dirty_young_block(HeapWord* start, size_t word_size);
 683 
 684   // Frees a humongous region by collapsing it into individual regions
 685   // and calling free_region() for each of them. The freed regions
 686   // will be added to the free list that's passed as a parameter (this
 687   // is usually a local list which will be appended to the master free
 688   // list later).
 689   // The method assumes that only a single thread is ever calling
 690   // this for a particular region at once.
 691   void free_humongous_region(HeapRegion* hr,
 692                              FreeRegionList* free_list);
 693 
 694   // Facility for allocating in 'archive' regions in high heap memory and
 695   // recording the allocated ranges. These should all be called from the
 696   // VM thread at safepoints, without the heap lock held. They can be used
 697   // to create and archive a set of heap regions which can be mapped at the
 698   // same fixed addresses in a subsequent JVM invocation.
 699   void begin_archive_alloc_range(bool open = false);
 700 
 701   // Check if the requested size would be too large for an archive allocation.
 702   bool is_archive_alloc_too_large(size_t word_size);
 703 
 704   // Allocate memory of the requested size from the archive region. This will
 705   // return NULL if the size is too large or if no memory is available. It
 706   // does not trigger a garbage collection.
 707   HeapWord* archive_mem_allocate(size_t word_size);
 708 
 709   // Optionally aligns the end address and returns the allocated ranges in
 710   // an array of MemRegions in order of ascending addresses.
 711   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 712                                size_t end_alignment_in_bytes = 0);
 713 
 714   // Facility for allocating a fixed range within the heap and marking
 715   // the containing regions as 'archive'. For use at JVM init time, when the
 716   // caller may mmap archived heap data at the specified range(s).
 717   // Verify that the MemRegions specified in the argument array are within the
 718   // reserved heap.
 719   bool check_archive_addresses(MemRegion* range, size_t count);
 720 
 721   // Commit the appropriate G1 regions containing the specified MemRegions
 722   // and mark them as 'archive' regions. The regions in the array must be
 723   // non-overlapping and in order of ascending address.
 724   bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
 725 
 726   // Insert any required filler objects in the G1 regions around the specified
 727   // ranges to make the regions parseable. This must be called after
 728   // alloc_archive_regions, and after class loading has occurred.
 729   void fill_archive_regions(MemRegion* range, size_t count);
 730 
 731   // Populate the G1BlockOffsetTablePart for archived regions with the given
 732   // memory ranges.
 733   void populate_archive_regions_bot_part(MemRegion* range, size_t count);
 734 
 735   // For each of the specified MemRegions, uncommit the containing G1 regions
 736   // which had been allocated by alloc_archive_regions. This should be called
 737   // rather than fill_archive_regions at JVM init time if the archive file
 738   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 739   void dealloc_archive_regions(MemRegion* range, size_t count);
 740 
 741 private:
 742 
 743   // Shrink the garbage-first heap by at most the given size (in bytes!).
 744   // (Rounds down to a HeapRegion boundary.)
 745   void shrink(size_t shrink_bytes);
 746   void shrink_helper(size_t expand_bytes);
 747 
 748   // Schedule the VM operation that will do an evacuation pause to
 749   // satisfy an allocation request of word_size. *succeeded will
 750   // return whether the VM operation was successful (it did do an
 751   // evacuation pause) or not (another thread beat us to it or the GC
 752   // locker was active). Given that we should not be holding the
 753   // Heap_lock when we enter this method, we will pass the
 754   // gc_count_before (i.e., total_collections()) as a parameter since
 755   // it has to be read while holding the Heap_lock. Currently, both
 756   // methods that call do_collection_pause() release the Heap_lock
 757   // before the call, so it's easy to read gc_count_before just before.
 758   HeapWord* do_collection_pause(size_t         word_size,
 759                                 uint           gc_count_before,
 760                                 bool*          succeeded,
 761                                 GCCause::Cause gc_cause);
 762 
 763   // Perform an incremental collection at a safepoint, possibly
 764   // followed by a by-policy upgrade to a full collection.  Returns
 765   // false if unable to do the collection due to the GC locker being
 766   // active, true otherwise.
 767   // precondition: at safepoint on VM thread
 768   // precondition: !is_gc_active()
 769   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 770 
 771   // Helper for do_collection_pause_at_safepoint, containing the guts
 772   // of the incremental collection pause, executed by the vm thread.
 773   void do_collection_pause_at_safepoint_helper(double target_pause_time_ms);
 774 
 775   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 776   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 777   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 778 
 779 public:
 780   // Start a concurrent cycle.
 781   void start_concurrent_cycle(bool concurrent_operation_is_full_mark);
 782 
 783   void prepare_tlabs_for_mutator();
 784 
 785   void retire_tlabs();
 786 
 787   void expand_heap_after_young_collection();
 788   // Update object copying statistics.
 789   void record_obj_copy_mem_stats();
 790 
 791 private:
 792   // The hot card cache for remembered set insertion optimization.
 793   G1HotCardCache* _hot_card_cache;
 794 
 795   // The g1 remembered set of the heap.
 796   G1RemSet* _rem_set;
 797   // Global card set configuration
 798   G1CardSetConfiguration _card_set_config;
 799 
 800 public:
 801   // After a collection pause, reset eden and the collection set.
 802   void clear_eden();
 803   void clear_collection_set();
 804 
 805   // Abandon the current collection set without recording policy
 806   // statistics or updating free lists.
 807   void abandon_collection_set(G1CollectionSet* collection_set);
 808 
 809   // The concurrent marker (and the thread it runs in.)
 810   G1ConcurrentMark* _cm;
 811   G1ConcurrentMarkThread* _cm_thread;
 812 
 813   // The concurrent refiner.
 814   G1ConcurrentRefine* _cr;
 815 
 816   // The parallel task queues
 817   G1ScannerTasksQueueSet *_task_queues;
 818 
 819   // ("Weak") Reference processing support.
 820   //
 821   // G1 has 2 instances of the reference processor class.
 822   //
 823   // One (_ref_processor_cm) handles reference object discovery and subsequent
 824   // processing during concurrent marking cycles. Discovery is enabled/disabled
 825   // at the start/end of a concurrent marking cycle.
 826   //
 827   // The other (_ref_processor_stw) handles reference object discovery and
 828   // processing during incremental evacuation pauses and full GC pauses.
 829   //
 830   // ## Incremental evacuation pauses
 831   //
 832   // STW ref processor discovery is enabled/disabled at the start/end of an
 833   // incremental evacuation pause. No particular handling of the CM ref
 834   // processor is needed, apart from treating the discovered references as
 835   // roots; CM discovery does not need to be temporarily disabled as all
 836   // marking threads are paused during incremental evacuation pauses.
 837   //
 838   // ## Full GC pauses
 839   //
 840   // We abort any ongoing concurrent marking cycle, disable CM discovery, and
 841   // temporarily substitute a new closure for the STW ref processor's
 842   // _is_alive_non_header field (old value is restored after the full GC). Then
 843   // STW ref processor discovery is enabled, and marking & compaction
 844   // commences.
 845 
 846   // The (stw) reference processor...
 847   ReferenceProcessor* _ref_processor_stw;
 848 
 849   // During reference object discovery, the _is_alive_non_header
 850   // closure (if non-null) is applied to the referent object to
 851   // determine whether the referent is live. If so then the
 852   // reference object does not need to be 'discovered' and can
 853   // be treated as a regular oop. This has the benefit of reducing
 854   // the number of 'discovered' reference objects that need to
 855   // be processed.
 856   //
 857   // Instance of the is_alive closure for embedding into the
 858   // STW reference processor as the _is_alive_non_header field.
 859   // Supplying a value for the _is_alive_non_header field is
 860   // optional but doing so prevents unnecessary additions to
 861   // the discovered lists during reference discovery.
 862   G1STWIsAliveClosure _is_alive_closure_stw;
 863 
 864   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 865 
 866   // The (concurrent marking) reference processor...
 867   ReferenceProcessor* _ref_processor_cm;
 868 
 869   // Instance of the concurrent mark is_alive closure for embedding
 870   // into the Concurrent Marking reference processor as the
 871   // _is_alive_non_header field. Supplying a value for the
 872   // _is_alive_non_header field is optional but doing so prevents
 873   // unnecessary additions to the discovered lists during reference
 874   // discovery.
 875   G1CMIsAliveClosure _is_alive_closure_cm;
 876 
 877   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 878 public:
 879 
 880   G1ScannerTasksQueueSet* task_queues() const;
 881   G1ScannerTasksQueue* task_queue(uint i) const;
 882 
 883   // Create a G1CollectedHeap.
 884   // Must call the initialize method afterwards.
 885   // May not return if something goes wrong.
 886   G1CollectedHeap();
 887 
 888 private:
 889   jint initialize_concurrent_refinement();
 890   jint initialize_service_thread();
 891 public:
 892   // Initialize the G1CollectedHeap to have the initial and
 893   // maximum sizes and remembered and barrier sets
 894   // specified by the policy object.
 895   jint initialize() override;
 896 
 897   // Returns whether concurrent mark threads (and the VM) are about to terminate.
 898   bool concurrent_mark_is_terminating() const;
 899 
 900   void stop() override;
 901   void safepoint_synchronize_begin() override;
 902   void safepoint_synchronize_end() override;
 903 
 904   // Does operations required after initialization has been done.
 905   void post_initialize() override;
 906 
 907   // Initialize weak reference processing.
 908   void ref_processing_init();
 909 
 910   Name kind() const override {
 911     return CollectedHeap::G1;
 912   }
 913 
 914   const char* name() const override {
 915     return "G1";
 916   }
 917 
 918   const G1CollectorState* collector_state() const { return &_collector_state; }
 919   G1CollectorState* collector_state() { return &_collector_state; }
 920 
 921   // The current policy object for the collector.
 922   G1Policy* policy() const { return _policy; }
 923   // The remembered set.
 924   G1RemSet* rem_set() const { return _rem_set; }
 925 
 926   inline G1GCPhaseTimes* phase_times() const;
 927 
 928   const G1CollectionSet* collection_set() const { return &_collection_set; }
 929   G1CollectionSet* collection_set() { return &_collection_set; }
 930 
 931   SoftRefPolicy* soft_ref_policy() override;
 932 
 933   void initialize_serviceability() override;
 934   MemoryUsage memory_usage() override;
 935   GrowableArray<GCMemoryManager*> memory_managers() override;
 936   GrowableArray<MemoryPool*> memory_pools() override;
 937 
 938   void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) override;
 939 
 940   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
 941   void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
 942 
 943   // The shared block offset table array.
 944   G1BlockOffsetTable* bot() const { return _bot; }
 945 
 946   // Reference Processing accessors
 947 
 948   // The STW reference processor....
 949   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
 950 
 951   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
 952   STWGCTimer* gc_timer_stw() const { return _gc_timer_stw; }
 953 
 954   // The Concurrent Marking reference processor...
 955   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 956 
 957   size_t unused_committed_regions_in_bytes() const;
 958 
 959   size_t capacity() const override;
 960   size_t used() const override;
 961   // This should be called when we're not holding the heap lock. The
 962   // result might be a bit inaccurate.
 963   size_t used_unlocked() const;
 964   size_t recalculate_used() const;
 965 
 966   // These virtual functions do the actual allocation.
 967   // Some heaps may offer a contiguous region for shared non-blocking
 968   // allocation, via inlined code (by exporting the address of the top and
 969   // end fields defining the extent of the contiguous allocation region.)
 970   // But G1CollectedHeap doesn't yet support this.
 971 
 972   bool is_maximal_no_gc() const override {
 973     return _hrm.available() == 0;
 974   }
 975 
 976   // Returns true if an incremental GC should be upgrade to a full gc. This
 977   // is done when there are no free regions and the heap can't be expanded.
 978   bool should_upgrade_to_full_gc() const {
 979     return is_maximal_no_gc() && num_free_regions() == 0;
 980   }
 981 
 982   // The current number of regions in the heap.
 983   uint num_regions() const { return _hrm.length(); }
 984 
 985   // The max number of regions reserved for the heap. Except for static array
 986   // sizing purposes you probably want to use max_regions().
 987   uint max_reserved_regions() const { return _hrm.reserved_length(); }
 988 
 989   // Max number of regions that can be committed.
 990   uint max_regions() const { return _hrm.max_length(); }
 991 
 992   // The number of regions that are completely free.
 993   uint num_free_regions() const { return _hrm.num_free_regions(); }
 994 
 995   // The number of regions that can be allocated into.
 996   uint num_free_or_available_regions() const { return num_free_regions() + _hrm.available(); }
 997 
 998   MemoryUsage get_auxiliary_data_memory_usage() const {
 999     return _hrm.get_auxiliary_data_memory_usage();
1000   }
1001 
1002   // The number of regions that are not completely free.
1003   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1004 
1005 #ifdef ASSERT
1006   bool is_on_master_free_list(HeapRegion* hr) {
1007     return _hrm.is_free(hr);
1008   }
1009 #endif // ASSERT
1010 
1011   inline void old_set_add(HeapRegion* hr);
1012   inline void old_set_remove(HeapRegion* hr);
1013 
1014   inline void archive_set_add(HeapRegion* hr);
1015 
1016   size_t non_young_capacity_bytes() {
1017     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1018   }
1019 
1020   // Determine whether the given region is one that we are using as an
1021   // old GC alloc region.
1022   bool is_old_gc_alloc_region(HeapRegion* hr);
1023 
1024   // Perform a collection of the heap; intended for use in implementing
1025   // "System.gc".  This probably implies as full a collection as the
1026   // "CollectedHeap" supports.
1027   void collect(GCCause::Cause cause) override;
1028 
1029   // Perform a collection of the heap with the given cause.
1030   // Returns whether this collection actually executed.
1031   bool try_collect(GCCause::Cause cause, const G1GCCounters& counters_before);
1032 
1033   void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause);
1034 
1035   void remove_from_old_gen_sets(const uint old_regions_removed,
1036                                 const uint archive_regions_removed,
1037                                 const uint humongous_regions_removed);
1038   void prepend_to_freelist(FreeRegionList* list);
1039   void decrement_summary_bytes(size_t bytes);
1040 
1041   bool is_in(const void* p) const override;
1042 
1043   // Return "TRUE" iff the given object address is within the collection
1044   // set. Assumes that the reference points into the heap.
1045   inline bool is_in_cset(const HeapRegion *hr);
1046   inline bool is_in_cset(oop obj);
1047   inline bool is_in_cset(HeapWord* addr);
1048 
1049   inline bool is_in_cset_or_humongous(const oop obj);
1050 
1051  private:
1052   // This array is used for a quick test on whether a reference points into
1053   // the collection set or not. Each of the array's elements denotes whether the
1054   // corresponding region is in the collection set or not.
1055   G1HeapRegionAttrBiasedMappedArray _region_attr;
1056 
1057  public:
1058 
1059   inline G1HeapRegionAttr region_attr(const void* obj) const;
1060   inline G1HeapRegionAttr region_attr(uint idx) const;
1061 
1062   MemRegion reserved() const {
1063     return _hrm.reserved();
1064   }
1065 
1066   bool is_in_reserved(const void* addr) const {
1067     return reserved().contains(addr);
1068   }
1069 
1070   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1071 
1072   G1CardTable* card_table() const {
1073     return _card_table;
1074   }
1075 
1076   // Iteration functions.
1077 
1078   void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
1079 
1080   // Iterate over all objects, calling "cl.do_object" on each.
1081   void object_iterate(ObjectClosure* cl) override;
1082 
1083   ParallelObjectIteratorImpl* parallel_object_iterator(uint thread_num) override;
1084 
1085   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1086   void keep_alive(oop obj) override;
1087 
1088   // Iterate over heap regions, in address order, terminating the
1089   // iteration early if the "do_heap_region" method returns "true".
1090   void heap_region_iterate(HeapRegionClosure* blk) const;
1091 
1092   // Return the region with the given index. It assumes the index is valid.
1093   inline HeapRegion* region_at(uint index) const;
1094   inline HeapRegion* region_at_or_null(uint index) const;
1095 
1096   // Return the next region (by index) that is part of the same
1097   // humongous object that hr is part of.
1098   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1099 
1100   // Calculate the region index of the given address. Given address must be
1101   // within the heap.
1102   inline uint addr_to_region(HeapWord* addr) const;
1103 
1104   inline HeapWord* bottom_addr_for_region(uint index) const;
1105 
1106   // Two functions to iterate over the heap regions in parallel. Threads
1107   // compete using the HeapRegionClaimer to claim the regions before
1108   // applying the closure on them.
1109   // The _from_worker_offset version uses the HeapRegionClaimer and
1110   // the worker id to calculate a start offset to prevent all workers to
1111   // start from the point.
1112   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1113                                                   HeapRegionClaimer* hrclaimer,
1114                                                   uint worker_id) const;
1115 
1116   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1117                                           HeapRegionClaimer* hrclaimer) const;
1118 
1119   // Iterate over all regions in the collection set in parallel.
1120   void collection_set_par_iterate_all(HeapRegionClosure* cl,
1121                                       HeapRegionClaimer* hr_claimer,
1122                                       uint worker_id);
1123 
1124   // Iterate over all regions currently in the current collection set.
1125   void collection_set_iterate_all(HeapRegionClosure* blk);
1126 
1127   // Iterate over the regions in the current increment of the collection set.
1128   // Starts the iteration so that the start regions of a given worker id over the
1129   // set active_workers are evenly spread across the set of collection set regions
1130   // to be iterated.
1131   // The variant with the HeapRegionClaimer guarantees that the closure will be
1132   // applied to a particular region exactly once.
1133   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
1134     collection_set_iterate_increment_from(blk, NULL, worker_id);
1135   }
1136   void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1137   // Iterate over the array of region indexes, uint regions[length], applying
1138   // the given HeapRegionClosure on each region. The worker_id will determine where
1139   // to start the iteration to allow for more efficient parallel iteration.
1140   void par_iterate_regions_array(HeapRegionClosure* cl,
1141                                  HeapRegionClaimer* hr_claimer,
1142                                  const uint regions[],
1143                                  size_t length,
1144                                  uint worker_id) const;
1145 
1146   // Returns the HeapRegion that contains addr. addr must not be NULL.
1147   template <class T>
1148   inline HeapRegion* heap_region_containing(const T addr) const;
1149 
1150   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1151   // region. addr must not be NULL.
1152   template <class T>
1153   inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1154 
1155   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1156   // each address in the (reserved) heap is a member of exactly
1157   // one block.  The defining characteristic of a block is that it is
1158   // possible to find its size, and thus to progress forward to the next
1159   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1160   // represent Java objects, or they might be free blocks in a
1161   // free-list-based heap (or subheap), as long as the two kinds are
1162   // distinguishable and the size of each is determinable.
1163 
1164   // Returns the address of the start of the "block" that contains the
1165   // address "addr".  We say "blocks" instead of "object" since some heaps
1166   // may not pack objects densely; a chunk may either be an object or a
1167   // non-object.
1168   HeapWord* block_start(const void* addr) const;
1169 
1170   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1171   // the block is an object.
1172   bool block_is_obj(const HeapWord* addr) const;
1173 
1174   // Section on thread-local allocation buffers (TLABs)
1175   // See CollectedHeap for semantics.
1176 
1177   size_t tlab_capacity(Thread* ignored) const override;
1178   size_t tlab_used(Thread* ignored) const override;
1179   size_t max_tlab_size() const override;
1180   size_t unsafe_max_tlab_alloc(Thread* ignored) const override;
1181 
1182   inline bool is_in_young(const oop obj) const;
1183 
1184   // Returns "true" iff the given word_size is "very large".
1185   static bool is_humongous(size_t word_size) {
1186     // Note this has to be strictly greater-than as the TLABs
1187     // are capped at the humongous threshold and we want to
1188     // ensure that we don't try to allocate a TLAB as
1189     // humongous and that we don't allocate a humongous
1190     // object in a TLAB.
1191     return word_size > _humongous_object_threshold_in_words;
1192   }
1193 
1194   // Returns the humongous threshold for a specific region size
1195   static size_t humongous_threshold_for(size_t region_size) {
1196     return (region_size / 2);
1197   }
1198 
1199   // Returns the number of regions the humongous object of the given word size
1200   // requires.
1201   static size_t humongous_obj_size_in_regions(size_t word_size);
1202 
1203   // Print the maximum heap capacity.
1204   size_t max_capacity() const override;
1205 
1206   Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }
1207 
1208   // Convenience function to be used in situations where the heap type can be
1209   // asserted to be this type.
1210   static G1CollectedHeap* heap() {
1211     return named_heap<G1CollectedHeap>(CollectedHeap::G1);
1212   }
1213 
1214   void set_region_short_lived_locked(HeapRegion* hr);
1215   // add appropriate methods for any other surv rate groups
1216 
1217   G1SurvivorRegions* survivor() { return &_survivor; }
1218 
1219   uint eden_regions_count() const { return _eden.length(); }
1220   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1221   uint survivor_regions_count() const { return _survivor.length(); }
1222   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1223   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1224   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1225   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1226   uint old_regions_count() const { return _old_set.length(); }
1227   uint archive_regions_count() const { return _archive_set.length(); }
1228   uint humongous_regions_count() const { return _humongous_set.length(); }
1229 
1230 #ifdef ASSERT
1231   bool check_young_list_empty();
1232 #endif
1233 
1234   bool is_marked_next(oop obj) const;
1235 
1236   // Determine if an object is dead, given the object and also
1237   // the region to which the object belongs.
1238   inline bool is_obj_dead(const oop obj, const HeapRegion* hr) const;
1239 
1240   // Determine if an object is dead, given only the object itself.
1241   // This will find the region to which the object belongs and
1242   // then call the region version of the same function.
1243 
1244   // Added if it is NULL it isn't dead.
1245 
1246   inline bool is_obj_dead(const oop obj) const;
1247 
1248   inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1249   inline bool is_obj_dead_full(const oop obj) const;
1250 
1251   // Mark the live object that failed evacuation in the prev bitmap.
1252   void mark_evac_failure_object(const oop obj, uint worker_id) const;
1253 
1254   G1ConcurrentMark* concurrent_mark() const { return _cm; }
1255 
1256   // Refinement
1257 
1258   G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1259 
1260   // Optimized nmethod scanning support routines
1261 
1262   // Register the given nmethod with the G1 heap.
1263   void register_nmethod(nmethod* nm) override;
1264 
1265   // Unregister the given nmethod from the G1 heap.
1266   void unregister_nmethod(nmethod* nm) override;
1267 
1268   // No nmethod flushing needed.
1269   void flush_nmethod(nmethod* nm) override {}
1270 
1271   // No nmethod verification implemented.
1272   void verify_nmethod(nmethod* nm) override {}
1273 
1274   // Recalculate amount of used memory after GC. Must be called after all allocation
1275   // has finished.
1276   void update_used_after_gc(bool evacuation_failed);
1277   // Reset and re-enable the hot card cache.
1278   // Note the counts for the cards in the regions in the
1279   // collection set are reset when the collection set is freed.
1280   void reset_hot_card_cache();
1281   // Free up superfluous code root memory.
1282   void purge_code_root_memory();
1283 
1284   // Rebuild the code root lists for each region
1285   // after a full GC.
1286   void rebuild_code_roots();
1287 
1288   // Performs cleaning of data structures after class unloading.
1289   void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
1290 
1291   // Verification
1292 
1293   // Perform any cleanup actions necessary before allowing a verification.
1294   void prepare_for_verify() override;
1295 
1296   // Perform verification.
1297 
1298   // vo == UsePrevMarking -> use "prev" marking information,
1299   // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1300   //
1301   // NOTE: Only the "prev" marking information is guaranteed to be
1302   // consistent most of the time, so most calls to this should use
1303   // vo == UsePrevMarking.
1304   // Currently there is only one place where this is called with
1305   // vo == UseFullMarking, which is to verify the marking during a
1306   // full GC.
1307   void verify(VerifyOption vo) override;
1308 
1309   // WhiteBox testing support.
1310   bool supports_concurrent_gc_breakpoints() const override;
1311 
1312   WorkerThreads* safepoint_workers() override { return _workers; }
1313 
1314   bool is_archived_object(oop object) const override;
1315 
1316   // The methods below are here for convenience and dispatch the
1317   // appropriate method depending on value of the given VerifyOption
1318   // parameter. The values for that parameter, and their meanings,
1319   // are the same as those above.
1320 
1321   bool is_obj_dead_cond(const oop obj,
1322                         const HeapRegion* hr,
1323                         const VerifyOption vo) const;
1324 
1325   bool is_obj_dead_cond(const oop obj,
1326                         const VerifyOption vo) const;
1327 
1328   G1HeapSummary create_g1_heap_summary();
1329   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1330 
1331   // Printing
1332 private:
1333   void print_heap_regions() const;
1334   void print_regions_on(outputStream* st) const;
1335 
1336 public:
1337   void print_on(outputStream* st) const override;
1338   void print_extended_on(outputStream* st) const override;
1339   void print_on_error(outputStream* st) const override;
1340 
1341   void gc_threads_do(ThreadClosure* tc) const override;
1342 
1343   // Override
1344   void print_tracing_info() const override;
1345 
1346   // The following two methods are helpful for debugging RSet issues.
1347   void print_cset_rsets() PRODUCT_RETURN;
1348   void print_all_rsets() PRODUCT_RETURN;
1349 
1350   // Used to print information about locations in the hs_err file.
1351   bool print_location(outputStream* st, void* addr) const override;
1352 };
1353 
1354 // Scoped object that performs common pre- and post-gc heap printing operations.
1355 class G1HeapPrinterMark : public StackObj {
1356   G1CollectedHeap* _g1h;
1357   G1HeapTransition _heap_transition;
1358 
1359 public:
1360   G1HeapPrinterMark(G1CollectedHeap* g1h);
1361   ~G1HeapPrinterMark();
1362 };
1363 
1364 // Scoped object that performs common pre- and post-gc operations related to
1365 // JFR events.
1366 class G1JFRTracerMark : public StackObj {
1367 protected:
1368   STWGCTimer* _timer;
1369   GCTracer* _tracer;
1370 
1371 public:
1372   G1JFRTracerMark(STWGCTimer* timer, GCTracer* tracer);
1373   ~G1JFRTracerMark();
1374 };
1375 
1376 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP