1 /*
   2  * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CardSet.hpp"
  32 #include "gc/g1/g1CollectionSet.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ConcurrentMark.hpp"
  35 #include "gc/g1/g1EdenRegions.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1GCPauseType.hpp"
  38 #include "gc/g1/g1HeapRegionAttr.hpp"
  39 #include "gc/g1/g1HeapTransition.hpp"
  40 #include "gc/g1/g1HeapVerifier.hpp"
  41 #include "gc/g1/g1HRPrinter.hpp"
  42 #include "gc/g1/g1MonitoringSupport.hpp"
  43 #include "gc/g1/g1MonotonicArenaFreeMemoryTask.hpp"
  44 #include "gc/g1/g1MonotonicArenaFreePool.hpp"
  45 #include "gc/g1/g1NUMA.hpp"
  46 #include "gc/g1/g1SurvivorRegions.hpp"
  47 #include "gc/g1/g1YoungGCEvacFailureInjector.hpp"
  48 #include "gc/g1/heapRegionManager.hpp"
  49 #include "gc/g1/heapRegionSet.hpp"
  50 #include "gc/shared/barrierSet.hpp"
  51 #include "gc/shared/collectedHeap.hpp"
  52 #include "gc/shared/gcHeapSummary.hpp"
  53 #include "gc/shared/plab.hpp"
  54 #include "gc/shared/softRefPolicy.hpp"
  55 #include "gc/shared/taskqueue.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/iterator.hpp"
  58 #include "memory/memRegion.hpp"
  59 #include "runtime/mutexLocker.hpp"
  60 #include "utilities/bitMap.hpp"
  61 
  62 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  63 // It uses the "Garbage First" heap organization and algorithm, which
  64 // may combine concurrent marking with parallel, incremental compaction of
  65 // heap subsets that will yield large amounts of garbage.
  66 
  67 // Forward declarations
  68 class G1Allocator;
  69 class G1ArchiveAllocator;
  70 class G1BatchedTask;
  71 class G1CardTableEntryClosure;
  72 class G1ConcurrentMark;
  73 class G1ConcurrentMarkThread;
  74 class G1ConcurrentRefine;
  75 class G1GCCounters;
  76 class G1GCPhaseTimes;
  77 class G1HeapSizingPolicy;
  78 class G1HotCardCache;
  79 class G1NewTracer;
  80 class G1RemSet;
  81 class G1ServiceTask;
  82 class G1ServiceThread;
  83 class GCMemoryManager;
  84 class HeapRegion;
  85 class MemoryPool;
  86 class nmethod;
  87 class ReferenceProcessor;
  88 class STWGCTimer;
  89 class WorkerThreads;
  90 
  91 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
  92 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
  93 
  94 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
  95 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
  96 
  97 // The G1 STW is alive closure.
  98 // An instance is embedded into the G1CH and used as the
  99 // (optional) _is_alive_non_header closure in the STW
 100 // reference processor. It is also extensively used during
 101 // reference processing during STW evacuation pauses.
 102 class G1STWIsAliveClosure : public BoolObjectClosure {
 103   G1CollectedHeap* _g1h;
 104 public:
 105   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 106   bool do_object_b(oop p) override;
 107 };
 108 
 109 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 110   G1CollectedHeap* _g1h;
 111 public:
 112   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 113   bool do_object_b(oop p) override;
 114 };
 115 
 116 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 117  private:
 118   void reset_from_card_cache(uint start_idx, size_t num_regions);
 119  public:
 120   void on_commit(uint start_idx, size_t num_regions, bool zero_filled) override;
 121 };
 122 
 123 class G1CollectedHeap : public CollectedHeap {
 124   friend class VM_G1CollectForAllocation;
 125   friend class VM_G1CollectFull;
 126   friend class VM_G1TryInitiateConcMark;
 127   friend class VMStructs;
 128   friend class MutatorAllocRegion;
 129   friend class G1FullCollector;
 130   friend class G1GCAllocRegion;
 131   friend class G1HeapVerifier;
 132 
 133   friend class G1YoungGCVerifierMark;
 134 
 135   // Closures used in implementation.
 136   friend class G1EvacuateRegionsTask;
 137   friend class G1PLABAllocator;
 138 
 139   // Other related classes.
 140   friend class G1HeapPrinterMark;
 141   friend class HeapRegionClaimer;
 142 
 143   // Testing classes.
 144   friend class G1CheckRegionAttrTableClosure;
 145 
 146 private:
 147   G1ServiceThread* _service_thread;
 148   G1ServiceTask* _periodic_gc_task;
 149   G1MonotonicArenaFreeMemoryTask* _free_arena_memory_task;
 150 
 151   WorkerThreads* _workers;
 152   G1CardTable* _card_table;
 153 
 154   Ticks _collection_pause_end;
 155 
 156   SoftRefPolicy      _soft_ref_policy;
 157 
 158   static size_t _humongous_object_threshold_in_words;
 159 
 160   // These sets keep track of old, archive and humongous regions respectively.
 161   HeapRegionSet _old_set;
 162   HeapRegionSet _archive_set;
 163   HeapRegionSet _humongous_set;
 164 
 165   // Young gen memory statistics before GC.
 166   G1MonotonicArenaMemoryStats _young_gen_card_set_stats;
 167   // Collection set candidates memory statistics after GC.
 168   G1MonotonicArenaMemoryStats _collection_set_candidates_card_set_stats;
 169 
 170   // The block offset table for the G1 heap.
 171   G1BlockOffsetTable* _bot;
 172 
 173 public:
 174   void rebuild_free_region_list();
 175   // Start a new incremental collection set for the next pause.
 176   void start_new_collection_set();
 177 
 178   void prepare_region_for_full_compaction(HeapRegion* hr);
 179 
 180 private:
 181   // Rebuilds the region sets / lists so that they are repopulated to
 182   // reflect the contents of the heap. The only exception is the
 183   // humongous set which was not torn down in the first place. If
 184   // free_list_only is true, it will only rebuild the free list.
 185   void rebuild_region_sets(bool free_list_only);
 186 
 187   // Callback for region mapping changed events.
 188   G1RegionMappingChangedListener _listener;
 189 
 190   // Handle G1 NUMA support.
 191   G1NUMA* _numa;
 192 
 193   // The sequence of all heap regions in the heap.
 194   HeapRegionManager _hrm;
 195 
 196   // Manages all allocations with regions except humongous object allocations.
 197   G1Allocator* _allocator;
 198 
 199   G1YoungGCEvacFailureInjector _evac_failure_injector;
 200 
 201   // Manages all heap verification.
 202   G1HeapVerifier* _verifier;
 203 
 204   // Outside of GC pauses, the number of bytes used in all regions other
 205   // than the current allocation region(s).
 206   volatile size_t _summary_bytes_used;
 207 
 208   void increase_used(size_t bytes);
 209   void decrease_used(size_t bytes);
 210 
 211   void set_used(size_t bytes);
 212 
 213   // Number of bytes used in all regions during GC. Typically changed when
 214   // retiring a GC alloc region.
 215   size_t _bytes_used_during_gc;
 216 
 217 public:
 218   size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
 219 
 220 private:
 221   // Class that handles archive allocation ranges.
 222   G1ArchiveAllocator* _archive_allocator;
 223 
 224   // GC allocation statistics policy for survivors.
 225   G1EvacStats _survivor_evac_stats;
 226 
 227   // GC allocation statistics policy for tenured objects.
 228   G1EvacStats _old_evac_stats;
 229 
 230   // Helper for monitoring and management support.
 231   G1MonitoringSupport* _monitoring_support;
 232 
 233   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 234   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 235 public:
 236   uint num_humongous_objects() const { return _num_humongous_objects; }
 237   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 238   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 239 
 240   void set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates);
 241 
 242   bool should_sample_collection_set_candidates() const;
 243   void set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats);
 244   void set_young_gen_card_set_stats(const G1MonotonicArenaMemoryStats& stats);
 245 
 246 private:
 247 
 248   G1HRPrinter _hr_printer;
 249 
 250   // Return true if an explicit GC should start a concurrent cycle instead
 251   // of doing a STW full GC. A concurrent cycle should be started if:
 252   // (a) cause == _g1_humongous_allocation,
 253   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 254   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 255   // (d) cause == _wb_conc_mark or _wb_breakpoint,
 256   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 257   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 258 
 259   // Attempt to start a concurrent cycle with the indicated cause.
 260   // precondition: should_do_concurrent_full_gc(cause)
 261   bool try_collect_concurrently(GCCause::Cause cause,
 262                                 uint gc_counter,
 263                                 uint old_marking_started_before);
 264 
 265   // indicates whether we are in young or mixed GC mode
 266   G1CollectorState _collector_state;
 267 
 268   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 269   // concurrent cycles) we have started.
 270   volatile uint _old_marking_cycles_started;
 271 
 272   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 273   // concurrent cycles) we have completed.
 274   volatile uint _old_marking_cycles_completed;
 275 
 276   // This is a non-product method that is helpful for testing. It is
 277   // called at the end of a GC and artificially expands the heap by
 278   // allocating a number of dead regions. This way we can induce very
 279   // frequent marking cycles and stress the cleanup / concurrent
 280   // cleanup code more (as all the regions that will be allocated by
 281   // this method will be found dead by the marking cycle).
 282   void allocate_dummy_regions() PRODUCT_RETURN;
 283 
 284   // Create a memory mapper for auxiliary data structures of the given size and
 285   // translation factor.
 286   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 287                                                          size_t size,
 288                                                          size_t translation_factor);
 289 
 290   void trace_heap(GCWhen::Type when, const GCTracer* tracer) override;
 291 
 292   // These are macros so that, if the assert fires, we get the correct
 293   // line number, file, etc.
 294 
 295 #define heap_locking_asserts_params(_extra_message_)                          \
 296   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 297   (_extra_message_),                                                          \
 298   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 299   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 300   BOOL_TO_STR(Thread::current()->is_VM_thread())
 301 
 302 #define assert_heap_locked()                                                  \
 303   do {                                                                        \
 304     assert(Heap_lock->owned_by_self(),                                        \
 305            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 306   } while (0)
 307 
 308 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 309   do {                                                                        \
 310     assert(Heap_lock->owned_by_self() ||                                      \
 311            (SafepointSynchronize::is_at_safepoint() &&                        \
 312              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 313            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 314                                         "should be at a safepoint"));         \
 315   } while (0)
 316 
 317 #define assert_heap_locked_and_not_at_safepoint()                             \
 318   do {                                                                        \
 319     assert(Heap_lock->owned_by_self() &&                                      \
 320                                     !SafepointSynchronize::is_at_safepoint(), \
 321           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 322                                        "should not be at a safepoint"));      \
 323   } while (0)
 324 
 325 #define assert_heap_not_locked()                                              \
 326   do {                                                                        \
 327     assert(!Heap_lock->owned_by_self(),                                       \
 328         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 329   } while (0)
 330 
 331 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 332   do {                                                                        \
 333     assert(!Heap_lock->owned_by_self() &&                                     \
 334                                     !SafepointSynchronize::is_at_safepoint(), \
 335       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 336                                    "should not be at a safepoint"));          \
 337   } while (0)
 338 
 339 #define assert_at_safepoint_on_vm_thread()                                    \
 340   do {                                                                        \
 341     assert_at_safepoint();                                                    \
 342     assert(Thread::current_or_null() != NULL, "no current thread");           \
 343     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 344   } while (0)
 345 
 346 #ifdef ASSERT
 347 #define assert_used_and_recalculate_used_equal(g1h)                           \
 348   do {                                                                        \
 349     size_t cur_used_bytes = g1h->used();                                      \
 350     size_t recal_used_bytes = g1h->recalculate_used();                        \
 351     assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
 352            " same as recalculated used(" SIZE_FORMAT ").",                    \
 353            cur_used_bytes, recal_used_bytes);                                 \
 354   } while (0)
 355 #else
 356 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
 357 #endif
 358 
 359   // The young region list.
 360   G1EdenRegions _eden;
 361   G1SurvivorRegions _survivor;
 362 
 363   STWGCTimer* _gc_timer_stw;
 364 
 365   G1NewTracer* _gc_tracer_stw;
 366 
 367   // The current policy object for the collector.
 368   G1Policy* _policy;
 369   G1HeapSizingPolicy* _heap_sizing_policy;
 370 
 371   G1CollectionSet _collection_set;
 372 
 373   // Try to allocate a single non-humongous HeapRegion sufficient for
 374   // an allocation of the given word_size. If do_expand is true,
 375   // attempt to expand the heap if necessary to satisfy the allocation
 376   // request. 'type' takes the type of region to be allocated. (Use constants
 377   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 378   HeapRegion* new_region(size_t word_size,
 379                          HeapRegionType type,
 380                          bool do_expand,
 381                          uint node_index = G1NUMA::AnyNodeIndex);
 382 
 383   // Initialize a contiguous set of free regions of length num_regions
 384   // and starting at index first so that they appear as a single
 385   // humongous region.
 386   HeapWord* humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
 387                                                       uint num_regions,
 388                                                       size_t word_size);
 389 
 390   // Attempt to allocate a humongous object of the given size. Return
 391   // NULL if unsuccessful.
 392   HeapWord* humongous_obj_allocate(size_t word_size);
 393 
 394   // The following two methods, allocate_new_tlab() and
 395   // mem_allocate(), are the two main entry points from the runtime
 396   // into the G1's allocation routines. They have the following
 397   // assumptions:
 398   //
 399   // * They should both be called outside safepoints.
 400   //
 401   // * They should both be called without holding the Heap_lock.
 402   //
 403   // * All allocation requests for new TLABs should go to
 404   //   allocate_new_tlab().
 405   //
 406   // * All non-TLAB allocation requests should go to mem_allocate().
 407   //
 408   // * If either call cannot satisfy the allocation request using the
 409   //   current allocating region, they will try to get a new one. If
 410   //   this fails, they will attempt to do an evacuation pause and
 411   //   retry the allocation.
 412   //
 413   // * If all allocation attempts fail, even after trying to schedule
 414   //   an evacuation pause, allocate_new_tlab() will return NULL,
 415   //   whereas mem_allocate() will attempt a heap expansion and/or
 416   //   schedule a Full GC.
 417   //
 418   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 419   //   should never be called with word_size being humongous. All
 420   //   humongous allocation requests should go to mem_allocate() which
 421   //   will satisfy them with a special path.
 422 
 423   HeapWord* allocate_new_tlab(size_t min_size,
 424                               size_t requested_size,
 425                               size_t* actual_size) override;
 426 
 427   HeapWord* mem_allocate(size_t word_size,
 428                          bool*  gc_overhead_limit_was_exceeded) override;
 429 
 430   // First-level mutator allocation attempt: try to allocate out of
 431   // the mutator alloc region without taking the Heap_lock. This
 432   // should only be used for non-humongous allocations.
 433   inline HeapWord* attempt_allocation(size_t min_word_size,
 434                                       size_t desired_word_size,
 435                                       size_t* actual_word_size);
 436 
 437   // Second-level mutator allocation attempt: take the Heap_lock and
 438   // retry the allocation attempt, potentially scheduling a GC
 439   // pause. This should only be used for non-humongous allocations.
 440   HeapWord* attempt_allocation_slow(size_t word_size);
 441 
 442   // Takes the Heap_lock and attempts a humongous allocation. It can
 443   // potentially schedule a GC pause.
 444   HeapWord* attempt_allocation_humongous(size_t word_size);
 445 
 446   // Allocation attempt that should be called during safepoints (e.g.,
 447   // at the end of a successful GC). expect_null_mutator_alloc_region
 448   // specifies whether the mutator alloc region is expected to be NULL
 449   // or not.
 450   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 451                                             bool expect_null_mutator_alloc_region);
 452 
 453   // These methods are the "callbacks" from the G1AllocRegion class.
 454 
 455   // For mutator alloc regions.
 456   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
 457   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 458                                    size_t allocated_bytes);
 459 
 460   // For GC alloc regions.
 461   bool has_more_regions(G1HeapRegionAttr dest);
 462   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
 463   void retire_gc_alloc_region(HeapRegion* alloc_region,
 464                               size_t allocated_bytes, G1HeapRegionAttr dest);
 465 
 466   // - if explicit_gc is true, the GC is for a System.gc() etc,
 467   //   otherwise it's for a failed allocation.
 468   // - if clear_all_soft_refs is true, all soft references should be
 469   //   cleared during the GC.
 470   // - if do_maximal_compaction is true, full gc will do a maximally
 471   //   compacting collection, leaving no dead wood.
 472   // - it returns false if it is unable to do the collection due to the
 473   //   GC locker being active, true otherwise.
 474   bool do_full_collection(bool explicit_gc,
 475                           bool clear_all_soft_refs,
 476                           bool do_maximal_compaction);
 477 
 478   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 479   void do_full_collection(bool clear_all_soft_refs) override;
 480 
 481   // Helper to do a full collection that clears soft references.
 482   bool upgrade_to_full_collection();
 483 
 484   // Callback from VM_G1CollectForAllocation operation.
 485   // This function does everything necessary/possible to satisfy a
 486   // failed allocation request (including collection, expansion, etc.)
 487   HeapWord* satisfy_failed_allocation(size_t word_size,
 488                                       bool* succeeded);
 489   // Internal helpers used during full GC to split it up to
 490   // increase readability.
 491   bool abort_concurrent_cycle();
 492   void verify_before_full_collection(bool explicit_gc);
 493   void prepare_heap_for_full_collection();
 494   void prepare_heap_for_mutators();
 495   void abort_refinement();
 496   void verify_after_full_collection();
 497   void print_heap_after_full_collection();
 498 
 499   // Helper method for satisfy_failed_allocation()
 500   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 501                                              bool do_gc,
 502                                              bool maximal_compaction,
 503                                              bool expect_null_mutator_alloc_region,
 504                                              bool* gc_succeeded);
 505 
 506   // Attempting to expand the heap sufficiently
 507   // to support an allocation of the given "word_size".  If
 508   // successful, perform the allocation and return the address of the
 509   // allocated block, or else "NULL".
 510   HeapWord* expand_and_allocate(size_t word_size);
 511 
 512   void verify_numa_regions(const char* desc);
 513 
 514 public:
 515   // If during a concurrent start pause we may install a pending list head which is not
 516   // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
 517   // to discover.
 518   void make_pending_list_reachable();
 519 
 520   G1ServiceThread* service_thread() const { return _service_thread; }
 521 
 522   WorkerThreads* workers() const { return _workers; }
 523 
 524   // Run the given batch task using the workers.
 525   void run_batch_task(G1BatchedTask* cl);
 526 
 527   // Return "optimal" number of chunks per region we want to use for claiming areas
 528   // within a region to claim.
 529   // The returned value is a trade-off between granularity of work distribution and
 530   // memory usage and maintenance costs of that table.
 531   // Testing showed that 64 for 1M/2M region, 128 for 4M/8M regions, 256 for 16/32M regions,
 532   // and so on seems to be such a good trade-off.
 533   static uint get_chunks_per_region();
 534 
 535   G1Allocator* allocator() {
 536     return _allocator;
 537   }
 538 
 539   G1YoungGCEvacFailureInjector* evac_failure_injector() { return &_evac_failure_injector; }
 540 
 541   G1HeapVerifier* verifier() {
 542     return _verifier;
 543   }
 544 
 545   G1MonitoringSupport* monitoring_support() {
 546     assert(_monitoring_support != nullptr, "should have been initialized");
 547     return _monitoring_support;
 548   }
 549 
 550   void resize_heap_if_necessary();
 551 
 552   // Check if there is memory to uncommit and if so schedule a task to do it.
 553   void uncommit_regions_if_necessary();
 554   // Immediately uncommit uncommittable regions.
 555   uint uncommit_regions(uint region_limit);
 556   bool has_uncommittable_regions();
 557 
 558   G1NUMA* numa() const { return _numa; }
 559 
 560   // Expand the garbage-first heap by at least the given size (in bytes!).
 561   // Returns true if the heap was expanded by the requested amount;
 562   // false otherwise.
 563   // (Rounds up to a HeapRegion boundary.)
 564   bool expand(size_t expand_bytes, WorkerThreads* pretouch_workers = NULL, double* expand_time_ms = NULL);
 565   bool expand_single_region(uint node_index);
 566 
 567   // Returns the PLAB statistics for a given destination.
 568   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 569 
 570   // Determines PLAB size for a given destination.
 571   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 572   // Clamp the given PLAB word size to allowed values. Prevents humongous PLAB sizes
 573   // for two reasons:
 574   // * PLABs are allocated using a similar paths as oops, but should
 575   //   never be in a humongous region
 576   // * Allowing humongous PLABs needlessly churns the region free lists
 577   inline size_t clamp_plab_size(size_t value) const;
 578 
 579   // Do anything common to GC's.
 580   void gc_prologue(bool full);
 581   void gc_epilogue(bool full);
 582 
 583   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 584   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 585 
 586   inline bool is_humongous_reclaim_candidate(uint region);
 587 
 588   // Remove from the reclaim candidate set.  Also remove from the
 589   // collection set so that later encounters avoid the slow path.
 590   inline void set_humongous_is_live(oop obj);
 591 
 592   // Register the given region to be part of the collection set.
 593   inline void register_humongous_candidate_region_with_region_attr(uint index);
 594 
 595   // We register a region with the fast "in collection set" test. We
 596   // simply set to true the array slot corresponding to this region.
 597   void register_young_region_with_region_attr(HeapRegion* r) {
 598     _region_attr.set_in_young(r->hrm_index());
 599   }
 600   inline void register_new_survivor_region_with_region_attr(HeapRegion* r);
 601   inline void register_region_with_region_attr(HeapRegion* r);
 602   inline void register_old_region_with_region_attr(HeapRegion* r);
 603   inline void register_optional_region_with_region_attr(HeapRegion* r);
 604 
 605   void clear_region_attr(const HeapRegion* hr) {
 606     _region_attr.clear(hr);
 607   }
 608 
 609   void clear_region_attr() {
 610     _region_attr.clear();
 611   }
 612 
 613   // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
 614   // for all regions.
 615   void verify_region_attr_remset_is_tracked() PRODUCT_RETURN;
 616 
 617   void clear_bitmap_for_region(HeapRegion* hr);
 618 
 619   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 620 
 621   // This is called at the start of either a concurrent cycle or a Full
 622   // GC to update the number of old marking cycles started.
 623   void increment_old_marking_cycles_started();
 624 
 625   // This is called at the end of either a concurrent cycle or a Full
 626   // GC to update the number of old marking cycles completed. Those two
 627   // can happen in a nested fashion, i.e., we start a concurrent
 628   // cycle, a Full GC happens half-way through it which ends first,
 629   // and then the cycle notices that a Full GC happened and ends
 630   // too. The concurrent parameter is a boolean to help us do a bit
 631   // tighter consistency checking in the method. If concurrent is
 632   // false, the caller is the inner caller in the nesting (i.e., the
 633   // Full GC). If concurrent is true, the caller is the outer caller
 634   // in this nesting (i.e., the concurrent cycle). Further nesting is
 635   // not currently supported. The end of this call also notifies
 636   // the G1OldGCCount_lock in case a Java thread is waiting for a full
 637   // GC to happen (e.g., it called System.gc() with
 638   // +ExplicitGCInvokesConcurrent).
 639   // whole_heap_examined should indicate that during that old marking
 640   // cycle the whole heap has been examined for live objects (as opposed
 641   // to only parts, or aborted before completion).
 642   void increment_old_marking_cycles_completed(bool concurrent, bool whole_heap_examined);
 643 
 644   uint old_marking_cycles_started() const {
 645     return _old_marking_cycles_started;
 646   }
 647 
 648   uint old_marking_cycles_completed() const {
 649     return _old_marking_cycles_completed;
 650   }
 651 
 652   G1HRPrinter* hr_printer() { return &_hr_printer; }
 653 
 654   // Allocates a new heap region instance.
 655   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 656 
 657   // Allocate the highest free region in the reserved heap. This will commit
 658   // regions as necessary.
 659   HeapRegion* alloc_highest_free_region();
 660 
 661   // Frees a region by resetting its metadata and adding it to the free list
 662   // passed as a parameter (this is usually a local list which will be appended
 663   // to the master free list later or NULL if free list management is handled
 664   // in another way).
 665   // Callers must ensure they are the only one calling free on the given region
 666   // at the same time.
 667   void free_region(HeapRegion* hr, FreeRegionList* free_list);
 668 
 669   // It dirties the cards that cover the block so that the post
 670   // write barrier never queues anything when updating objects on this
 671   // block. It is assumed (and in fact we assert) that the block
 672   // belongs to a young region.
 673   inline void dirty_young_block(HeapWord* start, size_t word_size);
 674 
 675   // Frees a humongous region by collapsing it into individual regions
 676   // and calling free_region() for each of them. The freed regions
 677   // will be added to the free list that's passed as a parameter (this
 678   // is usually a local list which will be appended to the master free
 679   // list later).
 680   // The method assumes that only a single thread is ever calling
 681   // this for a particular region at once.
 682   void free_humongous_region(HeapRegion* hr,
 683                              FreeRegionList* free_list);
 684 
 685   // Facility for allocating in 'archive' regions in high heap memory and
 686   // recording the allocated ranges. These should all be called from the
 687   // VM thread at safepoints, without the heap lock held. They can be used
 688   // to create and archive a set of heap regions which can be mapped at the
 689   // same fixed addresses in a subsequent JVM invocation.
 690   void begin_archive_alloc_range(bool open = false);
 691 
 692   // Check if the requested size would be too large for an archive allocation.
 693   bool is_archive_alloc_too_large(size_t word_size);
 694 
 695   // Allocate memory of the requested size from the archive region. This will
 696   // return NULL if the size is too large or if no memory is available. It
 697   // does not trigger a garbage collection.
 698   HeapWord* archive_mem_allocate(size_t word_size);
 699 
 700   // Optionally aligns the end address and returns the allocated ranges in
 701   // an array of MemRegions in order of ascending addresses.
 702   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 703                                size_t end_alignment_in_bytes = 0);
 704 
 705   // Facility for allocating a fixed range within the heap and marking
 706   // the containing regions as 'archive'. For use at JVM init time, when the
 707   // caller may mmap archived heap data at the specified range(s).
 708   // Verify that the MemRegions specified in the argument array are within the
 709   // reserved heap.
 710   bool check_archive_addresses(MemRegion* range, size_t count);
 711 
 712   // Commit the appropriate G1 regions containing the specified MemRegions
 713   // and mark them as 'archive' regions. The regions in the array must be
 714   // non-overlapping and in order of ascending address.
 715   bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
 716 
 717   // Insert any required filler objects in the G1 regions around the specified
 718   // ranges to make the regions parseable. This must be called after
 719   // alloc_archive_regions, and after class loading has occurred.
 720   void fill_archive_regions(MemRegion* range, size_t count);
 721 
 722   // Populate the G1BlockOffsetTablePart for archived regions with the given
 723   // memory ranges.
 724   void populate_archive_regions_bot_part(MemRegion* range, size_t count);
 725 
 726   // For each of the specified MemRegions, uncommit the containing G1 regions
 727   // which had been allocated by alloc_archive_regions. This should be called
 728   // rather than fill_archive_regions at JVM init time if the archive file
 729   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 730   void dealloc_archive_regions(MemRegion* range, size_t count);
 731 
 732 private:
 733 
 734   // Shrink the garbage-first heap by at most the given size (in bytes!).
 735   // (Rounds down to a HeapRegion boundary.)
 736   void shrink(size_t shrink_bytes);
 737   void shrink_helper(size_t expand_bytes);
 738 
 739   // Schedule the VM operation that will do an evacuation pause to
 740   // satisfy an allocation request of word_size. *succeeded will
 741   // return whether the VM operation was successful (it did do an
 742   // evacuation pause) or not (another thread beat us to it or the GC
 743   // locker was active). Given that we should not be holding the
 744   // Heap_lock when we enter this method, we will pass the
 745   // gc_count_before (i.e., total_collections()) as a parameter since
 746   // it has to be read while holding the Heap_lock. Currently, both
 747   // methods that call do_collection_pause() release the Heap_lock
 748   // before the call, so it's easy to read gc_count_before just before.
 749   HeapWord* do_collection_pause(size_t         word_size,
 750                                 uint           gc_count_before,
 751                                 bool*          succeeded,
 752                                 GCCause::Cause gc_cause);
 753 
 754   // Perform an incremental collection at a safepoint, possibly
 755   // followed by a by-policy upgrade to a full collection.  Returns
 756   // false if unable to do the collection due to the GC locker being
 757   // active, true otherwise.
 758   // precondition: at safepoint on VM thread
 759   // precondition: !is_gc_active()
 760   bool do_collection_pause_at_safepoint();
 761 
 762   // Helper for do_collection_pause_at_safepoint, containing the guts
 763   // of the incremental collection pause, executed by the vm thread.
 764   void do_collection_pause_at_safepoint_helper();
 765 
 766   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 767   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 768   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 769 
 770 public:
 771   // Start a concurrent cycle.
 772   void start_concurrent_cycle(bool concurrent_operation_is_full_mark);
 773 
 774   void prepare_tlabs_for_mutator();
 775 
 776   void retire_tlabs();
 777 
 778   void expand_heap_after_young_collection();
 779   // Update object copying statistics.
 780   void record_obj_copy_mem_stats();
 781 
 782 private:
 783   // The hot card cache for remembered set insertion optimization.
 784   G1HotCardCache* _hot_card_cache;
 785 
 786   // The g1 remembered set of the heap.
 787   G1RemSet* _rem_set;
 788   // Global card set configuration
 789   G1CardSetConfiguration _card_set_config;
 790 
 791   G1MonotonicArenaFreePool _card_set_freelist_pool;
 792 
 793 public:
 794   // After a collection pause, reset eden and the collection set.
 795   void clear_eden();
 796   void clear_collection_set();
 797 
 798   // Abandon the current collection set without recording policy
 799   // statistics or updating free lists.
 800   void abandon_collection_set(G1CollectionSet* collection_set);
 801 
 802   // The concurrent marker (and the thread it runs in.)
 803   G1ConcurrentMark* _cm;
 804   G1ConcurrentMarkThread* _cm_thread;
 805 
 806   // The concurrent refiner.
 807   G1ConcurrentRefine* _cr;
 808 
 809   // The parallel task queues
 810   G1ScannerTasksQueueSet *_task_queues;
 811 
 812   // ("Weak") Reference processing support.
 813   //
 814   // G1 has 2 instances of the reference processor class.
 815   //
 816   // One (_ref_processor_cm) handles reference object discovery and subsequent
 817   // processing during concurrent marking cycles. Discovery is enabled/disabled
 818   // at the start/end of a concurrent marking cycle.
 819   //
 820   // The other (_ref_processor_stw) handles reference object discovery and
 821   // processing during incremental evacuation pauses and full GC pauses.
 822   //
 823   // ## Incremental evacuation pauses
 824   //
 825   // STW ref processor discovery is enabled/disabled at the start/end of an
 826   // incremental evacuation pause. No particular handling of the CM ref
 827   // processor is needed, apart from treating the discovered references as
 828   // roots; CM discovery does not need to be temporarily disabled as all
 829   // marking threads are paused during incremental evacuation pauses.
 830   //
 831   // ## Full GC pauses
 832   //
 833   // We abort any ongoing concurrent marking cycle, disable CM discovery, and
 834   // temporarily substitute a new closure for the STW ref processor's
 835   // _is_alive_non_header field (old value is restored after the full GC). Then
 836   // STW ref processor discovery is enabled, and marking & compaction
 837   // commences.
 838 
 839   // The (stw) reference processor...
 840   ReferenceProcessor* _ref_processor_stw;
 841 
 842   // During reference object discovery, the _is_alive_non_header
 843   // closure (if non-null) is applied to the referent object to
 844   // determine whether the referent is live. If so then the
 845   // reference object does not need to be 'discovered' and can
 846   // be treated as a regular oop. This has the benefit of reducing
 847   // the number of 'discovered' reference objects that need to
 848   // be processed.
 849   //
 850   // Instance of the is_alive closure for embedding into the
 851   // STW reference processor as the _is_alive_non_header field.
 852   // Supplying a value for the _is_alive_non_header field is
 853   // optional but doing so prevents unnecessary additions to
 854   // the discovered lists during reference discovery.
 855   G1STWIsAliveClosure _is_alive_closure_stw;
 856 
 857   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 858 
 859   // The (concurrent marking) reference processor...
 860   ReferenceProcessor* _ref_processor_cm;
 861 
 862   // Instance of the concurrent mark is_alive closure for embedding
 863   // into the Concurrent Marking reference processor as the
 864   // _is_alive_non_header field. Supplying a value for the
 865   // _is_alive_non_header field is optional but doing so prevents
 866   // unnecessary additions to the discovered lists during reference
 867   // discovery.
 868   G1CMIsAliveClosure _is_alive_closure_cm;
 869 
 870   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
 871 public:
 872 
 873   G1ScannerTasksQueueSet* task_queues() const;
 874   G1ScannerTasksQueue* task_queue(uint i) const;
 875 
 876   // Create a G1CollectedHeap.
 877   // Must call the initialize method afterwards.
 878   // May not return if something goes wrong.
 879   G1CollectedHeap();
 880 
 881 private:
 882   jint initialize_concurrent_refinement();
 883   jint initialize_service_thread();
 884 public:
 885   // Initialize the G1CollectedHeap to have the initial and
 886   // maximum sizes and remembered and barrier sets
 887   // specified by the policy object.
 888   jint initialize() override;
 889 
 890   // Returns whether concurrent mark threads (and the VM) are about to terminate.
 891   bool concurrent_mark_is_terminating() const;
 892 
 893   void stop() override;
 894   void safepoint_synchronize_begin() override;
 895   void safepoint_synchronize_end() override;
 896 
 897   // Does operations required after initialization has been done.
 898   void post_initialize() override;
 899 
 900   // Initialize weak reference processing.
 901   void ref_processing_init();
 902 
 903   Name kind() const override {
 904     return CollectedHeap::G1;
 905   }
 906 
 907   const char* name() const override {
 908     return "G1";
 909   }
 910 
 911   const G1CollectorState* collector_state() const { return &_collector_state; }
 912   G1CollectorState* collector_state() { return &_collector_state; }
 913 
 914   // The current policy object for the collector.
 915   G1Policy* policy() const { return _policy; }
 916   // The remembered set.
 917   G1RemSet* rem_set() const { return _rem_set; }
 918 
 919   const G1MonotonicArenaFreePool* card_set_freelist_pool() const { return &_card_set_freelist_pool; }
 920   G1MonotonicArenaFreePool* card_set_freelist_pool() { return &_card_set_freelist_pool; }
 921 
 922   inline G1GCPhaseTimes* phase_times() const;
 923 
 924   const G1CollectionSet* collection_set() const { return &_collection_set; }
 925   G1CollectionSet* collection_set() { return &_collection_set; }
 926 
 927   SoftRefPolicy* soft_ref_policy() override;
 928 
 929   void initialize_serviceability() override;
 930   MemoryUsage memory_usage() override;
 931   GrowableArray<GCMemoryManager*> memory_managers() override;
 932   GrowableArray<MemoryPool*> memory_pools() override;
 933 
 934   void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) override;
 935 
 936   static void start_codecache_marking_cycle_if_inactive();
 937 
 938   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
 939   void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
 940 
 941   // The shared block offset table array.
 942   G1BlockOffsetTable* bot() const { return _bot; }
 943 
 944   // Reference Processing accessors
 945 
 946   // The STW reference processor....
 947   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
 948 
 949   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
 950   STWGCTimer* gc_timer_stw() const { return _gc_timer_stw; }
 951 
 952   // The Concurrent Marking reference processor...
 953   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 954 
 955   size_t unused_committed_regions_in_bytes() const;
 956 
 957   size_t capacity() const override;
 958   size_t used() const override;
 959   // This should be called when we're not holding the heap lock. The
 960   // result might be a bit inaccurate.
 961   size_t used_unlocked() const;
 962   size_t recalculate_used() const;
 963 
 964   // These virtual functions do the actual allocation.
 965   // Some heaps may offer a contiguous region for shared non-blocking
 966   // allocation, via inlined code (by exporting the address of the top and
 967   // end fields defining the extent of the contiguous allocation region.)
 968   // But G1CollectedHeap doesn't yet support this.
 969 
 970   bool is_maximal_no_gc() const override {
 971     return _hrm.available() == 0;
 972   }
 973 
 974   // Returns true if an incremental GC should be upgrade to a full gc. This
 975   // is done when there are no free regions and the heap can't be expanded.
 976   bool should_upgrade_to_full_gc() const {
 977     return is_maximal_no_gc() && num_free_regions() == 0;
 978   }
 979 
 980   // The current number of regions in the heap.
 981   uint num_regions() const { return _hrm.length(); }
 982 
 983   // The max number of regions reserved for the heap. Except for static array
 984   // sizing purposes you probably want to use max_regions().
 985   uint max_reserved_regions() const { return _hrm.reserved_length(); }
 986 
 987   // Max number of regions that can be committed.
 988   uint max_regions() const { return _hrm.max_length(); }
 989 
 990   // The number of regions that are completely free.
 991   uint num_free_regions() const { return _hrm.num_free_regions(); }
 992 
 993   // The number of regions that can be allocated into.
 994   uint num_free_or_available_regions() const { return num_free_regions() + _hrm.available(); }
 995 
 996   MemoryUsage get_auxiliary_data_memory_usage() const {
 997     return _hrm.get_auxiliary_data_memory_usage();
 998   }
 999 
1000   // The number of regions that are not completely free.
1001   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1002 
1003 #ifdef ASSERT
1004   bool is_on_master_free_list(HeapRegion* hr) {
1005     return _hrm.is_free(hr);
1006   }
1007 #endif // ASSERT
1008 
1009   inline void old_set_add(HeapRegion* hr);
1010   inline void old_set_remove(HeapRegion* hr);
1011 
1012   inline void archive_set_add(HeapRegion* hr);
1013 
1014   size_t non_young_capacity_bytes() {
1015     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1016   }
1017 
1018   // Determine whether the given region is one that we are using as an
1019   // old GC alloc region.
1020   bool is_old_gc_alloc_region(HeapRegion* hr);
1021 
1022   // Perform a collection of the heap; intended for use in implementing
1023   // "System.gc".  This probably implies as full a collection as the
1024   // "CollectedHeap" supports.
1025   void collect(GCCause::Cause cause) override;
1026 
1027   // Perform a collection of the heap with the given cause.
1028   // Returns whether this collection actually executed.
1029   bool try_collect(GCCause::Cause cause, const G1GCCounters& counters_before);
1030 
1031   void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause);
1032 
1033   void remove_from_old_gen_sets(const uint old_regions_removed,
1034                                 const uint archive_regions_removed,
1035                                 const uint humongous_regions_removed);
1036   void prepend_to_freelist(FreeRegionList* list);
1037   void decrement_summary_bytes(size_t bytes);
1038 
1039   bool is_in(const void* p) const override;
1040 
1041   // Return "TRUE" iff the given object address is within the collection
1042   // set. Assumes that the reference points into the heap.
1043   inline bool is_in_cset(const HeapRegion *hr) const;
1044   inline bool is_in_cset(oop obj) const;
1045   inline bool is_in_cset(HeapWord* addr) const;
1046 
1047   inline bool is_in_cset_or_humongous_candidate(const oop obj);
1048 
1049  private:
1050   // This array is used for a quick test on whether a reference points into
1051   // the collection set or not. Each of the array's elements denotes whether the
1052   // corresponding region is in the collection set or not.
1053   G1HeapRegionAttrBiasedMappedArray _region_attr;
1054 
1055  public:
1056 
1057   inline G1HeapRegionAttr region_attr(const void* obj) const;
1058   inline G1HeapRegionAttr region_attr(uint idx) const;
1059 
1060   MemRegion reserved() const {
1061     return _hrm.reserved();
1062   }
1063 
1064   bool is_in_reserved(const void* addr) const {
1065     return reserved().contains(addr);
1066   }
1067 
1068   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1069 
1070   G1CardTable* card_table() const {
1071     return _card_table;
1072   }
1073 
1074   // Iteration functions.
1075 
1076   void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
1077 
1078   // Iterate over all objects, calling "cl.do_object" on each.
1079   void object_iterate(ObjectClosure* cl) override;
1080 
1081   ParallelObjectIteratorImpl* parallel_object_iterator(uint thread_num) override;
1082 
1083   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1084   void keep_alive(oop obj) override;
1085 
1086   // Iterate over heap regions, in address order, terminating the
1087   // iteration early if the "do_heap_region" method returns "true".
1088   void heap_region_iterate(HeapRegionClosure* blk) const;
1089   void heap_region_iterate(HeapRegionIndexClosure* blk) const;
1090 
1091   // Return the region with the given index. It assumes the index is valid.
1092   inline HeapRegion* region_at(uint index) const;
1093   inline HeapRegion* region_at_or_null(uint index) const;
1094 
1095   // Return the next region (by index) that is part of the same
1096   // humongous object that hr is part of.
1097   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1098 
1099   // Calculate the region index of the given address. Given address must be
1100   // within the heap.
1101   inline uint addr_to_region(const void* addr) const;
1102 
1103   inline HeapWord* bottom_addr_for_region(uint index) const;
1104 
1105   // Two functions to iterate over the heap regions in parallel. Threads
1106   // compete using the HeapRegionClaimer to claim the regions before
1107   // applying the closure on them.
1108   // The _from_worker_offset version uses the HeapRegionClaimer and
1109   // the worker id to calculate a start offset to prevent all workers to
1110   // start from the point.
1111   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1112                                                   HeapRegionClaimer* hrclaimer,
1113                                                   uint worker_id) const;
1114 
1115   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1116                                           HeapRegionClaimer* hrclaimer) const;
1117 
1118   // Iterate over all regions in the collection set in parallel.
1119   void collection_set_par_iterate_all(HeapRegionClosure* cl,
1120                                       HeapRegionClaimer* hr_claimer,
1121                                       uint worker_id);
1122 
1123   // Iterate over all regions currently in the current collection set.
1124   void collection_set_iterate_all(HeapRegionClosure* blk);
1125 
1126   // Iterate over the regions in the current increment of the collection set.
1127   // Starts the iteration so that the start regions of a given worker id over the
1128   // set active_workers are evenly spread across the set of collection set regions
1129   // to be iterated.
1130   // The variant with the HeapRegionClaimer guarantees that the closure will be
1131   // applied to a particular region exactly once.
1132   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
1133     collection_set_iterate_increment_from(blk, NULL, worker_id);
1134   }
1135   void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1136   // Iterate over the array of region indexes, uint regions[length], applying
1137   // the given HeapRegionClosure on each region. The worker_id will determine where
1138   // to start the iteration to allow for more efficient parallel iteration.
1139   void par_iterate_regions_array(HeapRegionClosure* cl,
1140                                  HeapRegionClaimer* hr_claimer,
1141                                  const uint regions[],
1142                                  size_t length,
1143                                  uint worker_id) const;
1144 
1145   // Returns the HeapRegion that contains addr. addr must not be nullptr.
1146   inline HeapRegion* heap_region_containing(const void* addr) const;
1147 
1148   // Returns the HeapRegion that contains addr, or nullptr if that is an uncommitted
1149   // region. addr must not be nullptr.
1150   inline HeapRegion* heap_region_containing_or_null(const void* addr) const;
1151 
1152   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1153   // each address in the (reserved) heap is a member of exactly
1154   // one block.  The defining characteristic of a block is that it is
1155   // possible to find its size, and thus to progress forward to the next
1156   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1157   // represent Java objects, or they might be free blocks in a
1158   // free-list-based heap (or subheap), as long as the two kinds are
1159   // distinguishable and the size of each is determinable.
1160 
1161   // Returns the address of the start of the "block" that contains the
1162   // address "addr".  We say "blocks" instead of "object" since some heaps
1163   // may not pack objects densely; a chunk may either be an object or a
1164   // non-object.
1165   HeapWord* block_start(const void* addr) const;
1166 
1167   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1168   // the block is an object.
1169   bool block_is_obj(const HeapWord* addr) const;
1170 
1171   // Section on thread-local allocation buffers (TLABs)
1172   // See CollectedHeap for semantics.
1173 
1174   size_t tlab_capacity(Thread* ignored) const override;
1175   size_t tlab_used(Thread* ignored) const override;
1176   size_t max_tlab_size() const override;
1177   size_t unsafe_max_tlab_alloc(Thread* ignored) const override;
1178 
1179   inline bool is_in_young(const oop obj) const;
1180   inline bool requires_barriers(stackChunkOop obj) const override;
1181 
1182   // Returns "true" iff the given word_size is "very large".
1183   static bool is_humongous(size_t word_size) {
1184     // Note this has to be strictly greater-than as the TLABs
1185     // are capped at the humongous threshold and we want to
1186     // ensure that we don't try to allocate a TLAB as
1187     // humongous and that we don't allocate a humongous
1188     // object in a TLAB.
1189     return word_size > _humongous_object_threshold_in_words;
1190   }
1191 
1192   // Returns the humongous threshold for a specific region size
1193   static size_t humongous_threshold_for(size_t region_size) {
1194     return (region_size / 2);
1195   }
1196 
1197   // Returns the number of regions the humongous object of the given word size
1198   // requires.
1199   static size_t humongous_obj_size_in_regions(size_t word_size);
1200 
1201   // Print the maximum heap capacity.
1202   size_t max_capacity() const override;
1203 
1204   Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }
1205 
1206   // Convenience function to be used in situations where the heap type can be
1207   // asserted to be this type.
1208   static G1CollectedHeap* heap() {
1209     return named_heap<G1CollectedHeap>(CollectedHeap::G1);
1210   }
1211 
1212   void set_region_short_lived_locked(HeapRegion* hr);
1213   // add appropriate methods for any other surv rate groups
1214 
1215   G1SurvivorRegions* survivor() { return &_survivor; }
1216 
1217   uint eden_regions_count() const { return _eden.length(); }
1218   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1219   uint survivor_regions_count() const { return _survivor.length(); }
1220   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1221   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1222   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1223   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1224   uint old_regions_count() const { return _old_set.length(); }
1225   uint archive_regions_count() const { return _archive_set.length(); }
1226   uint humongous_regions_count() const { return _humongous_set.length(); }
1227 
1228 #ifdef ASSERT
1229   bool check_young_list_empty();
1230 #endif
1231 
1232   bool is_marked(oop obj) const;
1233 
1234   inline static bool is_obj_filler(const oop obj);
1235   // Determine if an object is dead, given the object and also
1236   // the region to which the object belongs.
1237   inline bool is_obj_dead(const oop obj, const HeapRegion* hr) const;
1238 
1239   // Determine if an object is dead, given only the object itself.
1240   // This will find the region to which the object belongs and
1241   // then call the region version of the same function.
1242   // If obj is NULL it is not dead.
1243   inline bool is_obj_dead(const oop obj) const;
1244 
1245   inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1246   inline bool is_obj_dead_full(const oop obj) const;
1247 
1248   // Mark the live object that failed evacuation in the bitmap.
1249   void mark_evac_failure_object(uint worker_id, oop obj, size_t obj_size) const;
1250 
1251   G1ConcurrentMark* concurrent_mark() const { return _cm; }
1252 
1253   // Refinement
1254 
1255   G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1256 
1257   // Optimized nmethod scanning support routines
1258 
1259   // Register the given nmethod with the G1 heap.
1260   void register_nmethod(nmethod* nm) override;
1261 
1262   // Unregister the given nmethod from the G1 heap.
1263   void unregister_nmethod(nmethod* nm) override;
1264 
1265   // No nmethod verification implemented.
1266   void verify_nmethod(nmethod* nm) override {}
1267 
1268   // Recalculate amount of used memory after GC. Must be called after all allocation
1269   // has finished.
1270   void update_used_after_gc(bool evacuation_failed);
1271   // Reset and re-enable the hot card cache.
1272   // Note the counts for the cards in the regions in the
1273   // collection set are reset when the collection set is freed.
1274   void reset_hot_card_cache();
1275   // Free up superfluous code root memory.
1276   void purge_code_root_memory();
1277 
1278   // Rebuild the code root lists for each region
1279   // after a full GC.
1280   void rebuild_code_roots();
1281 
1282   // Performs cleaning of data structures after class unloading.
1283   void complete_cleaning(bool class_unloading_occurred);
1284 
1285   // Verification
1286 
1287   // Perform any cleanup actions necessary before allowing a verification.
1288   void prepare_for_verify() override;
1289 
1290   // Perform verification.
1291   void verify(VerifyOption vo) override;
1292 
1293   // WhiteBox testing support.
1294   bool supports_concurrent_gc_breakpoints() const override;
1295 
1296   WorkerThreads* safepoint_workers() override { return _workers; }
1297 
1298   bool is_archived_object(oop object) const override;
1299 
1300   // The methods below are here for convenience and dispatch the
1301   // appropriate method depending on value of the given VerifyOption
1302   // parameter. The values for that parameter, and their meanings,
1303   // are the same as those above.
1304 
1305   bool is_obj_dead_cond(const oop obj,
1306                         const HeapRegion* hr,
1307                         const VerifyOption vo) const;
1308 
1309   bool is_obj_dead_cond(const oop obj,
1310                         const VerifyOption vo) const;
1311 
1312   G1HeapSummary create_g1_heap_summary();
1313   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1314 
1315   // Printing
1316 private:
1317   void print_heap_regions() const;
1318   void print_regions_on(outputStream* st) const;
1319 
1320 public:
1321   void print_on(outputStream* st) const override;
1322   void print_extended_on(outputStream* st) const override;
1323   void print_on_error(outputStream* st) const override;
1324 
1325   void gc_threads_do(ThreadClosure* tc) const override;
1326 
1327   // Override
1328   void print_tracing_info() const override;
1329 
1330   // The following two methods are helpful for debugging RSet issues.
1331   void print_cset_rsets() PRODUCT_RETURN;
1332   void print_all_rsets() PRODUCT_RETURN;
1333 
1334   // Used to print information about locations in the hs_err file.
1335   bool print_location(outputStream* st, void* addr) const override;
1336 };
1337 
1338 // Scoped object that performs common pre- and post-gc heap printing operations.
1339 class G1HeapPrinterMark : public StackObj {
1340   G1CollectedHeap* _g1h;
1341   G1HeapTransition _heap_transition;
1342 
1343 public:
1344   G1HeapPrinterMark(G1CollectedHeap* g1h);
1345   ~G1HeapPrinterMark();
1346 };
1347 
1348 // Scoped object that performs common pre- and post-gc operations related to
1349 // JFR events.
1350 class G1JFRTracerMark : public StackObj {
1351 protected:
1352   STWGCTimer* _timer;
1353   GCTracer* _tracer;
1354 
1355 public:
1356   G1JFRTracerMark(STWGCTimer* timer, GCTracer* tracer);
1357   ~G1JFRTracerMark();
1358 };
1359 
1360 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP