1 /*
   2  * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1BiasedArray.hpp"
  30 #include "gc/g1/g1CardTable.hpp"
  31 #include "gc/g1/g1CollectionSet.hpp"
  32 #include "gc/g1/g1CollectorState.hpp"
  33 #include "gc/g1/g1ConcurrentMark.hpp"
  34 #include "gc/g1/g1EdenRegions.hpp"
  35 #include "gc/g1/g1EvacFailure.hpp"
  36 #include "gc/g1/g1EvacStats.hpp"
  37 #include "gc/g1/g1EvacuationInfo.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1GCPauseType.hpp"
  40 #include "gc/g1/g1HeapTransition.hpp"
  41 #include "gc/g1/g1HeapVerifier.hpp"
  42 #include "gc/g1/g1HRPrinter.hpp"
  43 #include "gc/g1/g1HeapRegionAttr.hpp"
  44 #include "gc/g1/g1MonitoringSupport.hpp"
  45 #include "gc/g1/g1NUMA.hpp"
  46 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  47 #include "gc/g1/g1SurvivorRegions.hpp"
  48 #include "gc/g1/heapRegionManager.hpp"
  49 #include "gc/g1/heapRegionSet.hpp"
  50 #include "gc/shared/barrierSet.hpp"
  51 #include "gc/shared/collectedHeap.hpp"
  52 #include "gc/shared/gcHeapSummary.hpp"
  53 #include "gc/shared/plab.hpp"
  54 #include "gc/shared/preservedMarks.hpp"
  55 #include "gc/shared/softRefPolicy.hpp"
  56 #include "gc/shared/taskqueue.hpp"
  57 #include "memory/memRegion.hpp"
  58 #include "utilities/stack.hpp"
  59 
  60 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
  61 // It uses the "Garbage First" heap organization and algorithm, which
  62 // may combine concurrent marking with parallel, incremental compaction of
  63 // heap subsets that will yield large amounts of garbage.
  64 
  65 // Forward declarations
  66 class HeapRegion;
  67 class GenerationSpec;
  68 class G1ParScanThreadState;
  69 class G1ParScanThreadStateSet;
  70 class G1ParScanThreadState;
  71 class MemoryPool;
  72 class MemoryManager;
  73 class ObjectClosure;
  74 class SpaceClosure;
  75 class CompactibleSpaceClosure;
  76 class Space;
  77 class G1BatchedGangTask;
  78 class G1CardTableEntryClosure;
  79 class G1CollectionSet;
  80 class G1Policy;
  81 class G1HotCardCache;
  82 class G1RemSet;
  83 class G1ServiceTask;
  84 class G1ServiceThread;
  85 class G1ConcurrentMark;
  86 class G1ConcurrentMarkThread;
  87 class G1ConcurrentRefine;
  88 class GenerationCounters;
  89 class STWGCTimer;
  90 class SlidingForwarding;
  91 class G1NewTracer;
  92 class EvacuationFailedInfo;
  93 class nmethod;
  94 class WorkGang;
  95 class G1Allocator;
  96 class G1ArchiveAllocator;
  97 class G1FullGCScope;
  98 class G1HeapVerifier;
  99 class G1HeapSizingPolicy;
 100 class G1HeapSummary;
 101 class G1EvacSummary;
 102 
 103 typedef OverflowTaskQueue<ScannerTask, mtGC>           G1ScannerTasksQueue;
 104 typedef GenericTaskQueueSet<G1ScannerTasksQueue, mtGC> G1ScannerTasksQueueSet;
 105 
 106 typedef int RegionIdx_t;   // needs to hold [ 0..max_reserved_regions() )
 107 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 108 
 109 // The G1 STW is alive closure.
 110 // An instance is embedded into the G1CH and used as the
 111 // (optional) _is_alive_non_header closure in the STW
 112 // reference processor. It is also extensively used during
 113 // reference processing during STW evacuation pauses.
 114 class G1STWIsAliveClosure : public BoolObjectClosure {
 115   G1CollectedHeap* _g1h;
 116 public:
 117   G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 118   bool do_object_b(oop p);
 119 };
 120 
 121 class G1STWSubjectToDiscoveryClosure : public BoolObjectClosure {
 122   G1CollectedHeap* _g1h;
 123 public:
 124   G1STWSubjectToDiscoveryClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
 125   bool do_object_b(oop p);
 126 };
 127 
 128 class G1RegionMappingChangedListener : public G1MappingChangedListener {
 129  private:
 130   void reset_from_card_cache(uint start_idx, size_t num_regions);
 131  public:
 132   virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
 133 };
 134 
 135 class G1CollectedHeap : public CollectedHeap {
 136   friend class VM_CollectForMetadataAllocation;
 137   friend class VM_G1CollectForAllocation;
 138   friend class VM_G1CollectFull;
 139   friend class VM_G1TryInitiateConcMark;
 140   friend class VMStructs;
 141   friend class MutatorAllocRegion;
 142   friend class G1FullCollector;
 143   friend class G1GCAllocRegion;
 144   friend class G1HeapVerifier;
 145 
 146   // Closures used in implementation.
 147   friend class G1ParScanThreadState;
 148   friend class G1ParScanThreadStateSet;
 149   friend class G1EvacuateRegionsTask;
 150   friend class G1PLABAllocator;
 151 
 152   // Other related classes.
 153   friend class HeapRegionClaimer;
 154 
 155   // Testing classes.
 156   friend class G1CheckRegionAttrTableClosure;
 157 
 158 private:
 159   G1ServiceThread* _service_thread;
 160   G1ServiceTask* _periodic_gc_task;
 161 
 162   WorkGang* _workers;
 163   G1CardTable* _card_table;
 164 
 165   Ticks _collection_pause_end;
 166 
 167   SoftRefPolicy      _soft_ref_policy;
 168 
 169   static size_t _humongous_object_threshold_in_words;
 170 
 171   // These sets keep track of old, archive and humongous regions respectively.
 172   HeapRegionSet _old_set;
 173   HeapRegionSet _archive_set;
 174   HeapRegionSet _humongous_set;
 175 
 176   void rebuild_free_region_list();
 177   // Start a new incremental collection set for the next pause.
 178   void start_new_collection_set();
 179 
 180   // The block offset table for the G1 heap.
 181   G1BlockOffsetTable* _bot;
 182 
 183 public:
 184   void prepare_region_for_full_compaction(HeapRegion* hr);
 185 
 186 private:
 187   // Rebuilds the region sets / lists so that they are repopulated to
 188   // reflect the contents of the heap. The only exception is the
 189   // humongous set which was not torn down in the first place. If
 190   // free_list_only is true, it will only rebuild the free list.
 191   void rebuild_region_sets(bool free_list_only);
 192 
 193   // Callback for region mapping changed events.
 194   G1RegionMappingChangedListener _listener;
 195 
 196   // Handle G1 NUMA support.
 197   G1NUMA* _numa;
 198 
 199   // The sequence of all heap regions in the heap.
 200   HeapRegionManager _hrm;
 201 
 202   // Manages all allocations with regions except humongous object allocations.
 203   G1Allocator* _allocator;
 204 
 205   // Manages all heap verification.
 206   G1HeapVerifier* _verifier;
 207 
 208   // Outside of GC pauses, the number of bytes used in all regions other
 209   // than the current allocation region(s).
 210   volatile size_t _summary_bytes_used;
 211 
 212   void increase_used(size_t bytes);
 213   void decrease_used(size_t bytes);
 214 
 215   void set_used(size_t bytes);
 216 
 217   // Number of bytes used in all regions during GC. Typically changed when
 218   // retiring a GC alloc region.
 219   size_t _bytes_used_during_gc;
 220 
 221   // Class that handles archive allocation ranges.
 222   G1ArchiveAllocator* _archive_allocator;
 223 
 224   // GC allocation statistics policy for survivors.
 225   G1EvacStats _survivor_evac_stats;
 226 
 227   // GC allocation statistics policy for tenured objects.
 228   G1EvacStats _old_evac_stats;
 229 
 230   // It specifies whether we should attempt to expand the heap after a
 231   // region allocation failure. If heap expansion fails we set this to
 232   // false so that we don't re-attempt the heap expansion (it's likely
 233   // that subsequent expansion attempts will also fail if one fails).
 234   // Currently, it is only consulted during GC and it's reset at the
 235   // start of each GC.
 236   bool _expand_heap_after_alloc_failure;
 237 
 238   // Helper for monitoring and management support.
 239   G1MonitoringSupport* _g1mm;
 240 
 241   SlidingForwarding* _forwarding;
 242 
 243   // Records whether the region at the given index is (still) a
 244   // candidate for eager reclaim.  Only valid for humongous start
 245   // regions; other regions have unspecified values.  Humongous start
 246   // regions are initialized at start of collection pause, with
 247   // candidates removed from the set as they are found reachable from
 248   // roots or the young generation.
 249   class HumongousReclaimCandidates : public G1BiasedMappedArray<bool> {
 250   protected:
 251     bool default_value() const { return false; }
 252   public:
 253     void clear() { G1BiasedMappedArray<bool>::clear(); }
 254     void set_candidate(uint region, bool value) {
 255       set_by_index(region, value);
 256     }
 257     bool is_candidate(uint region) {
 258       return get_by_index(region);
 259     }
 260   };
 261 
 262   HumongousReclaimCandidates _humongous_reclaim_candidates;
 263   uint _num_humongous_objects; // Current amount of (all) humongous objects found in the heap.
 264   uint _num_humongous_reclaim_candidates; // Number of humongous object eager reclaim candidates.
 265 public:
 266   uint num_humongous_objects() const { return _num_humongous_objects; }
 267   uint num_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates; }
 268   bool has_humongous_reclaim_candidates() const { return _num_humongous_reclaim_candidates > 0; }
 269 
 270   bool should_do_eager_reclaim() const;
 271 
 272   SlidingForwarding* forwarding() const {
 273     return _forwarding;
 274   }
 275 
 276 private:
 277 
 278   G1HRPrinter _hr_printer;
 279 
 280   // Return true if an explicit GC should start a concurrent cycle instead
 281   // of doing a STW full GC. A concurrent cycle should be started if:
 282   // (a) cause == _g1_humongous_allocation,
 283   // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
 284   // (c) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
 285   // (d) cause == _wb_conc_mark or _wb_breakpoint,
 286   // (e) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
 287   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 288 
 289   // Attempt to start a concurrent cycle with the indicated cause.
 290   // precondition: should_do_concurrent_full_gc(cause)
 291   bool try_collect_concurrently(GCCause::Cause cause,
 292                                 uint gc_counter,
 293                                 uint old_marking_started_before);
 294 
 295   // indicates whether we are in young or mixed GC mode
 296   G1CollectorState _collector_state;
 297 
 298   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 299   // concurrent cycles) we have started.
 300   volatile uint _old_marking_cycles_started;
 301 
 302   // Keeps track of how many "old marking cycles" (i.e., Full GCs or
 303   // concurrent cycles) we have completed.
 304   volatile uint _old_marking_cycles_completed;
 305 
 306   // This is a non-product method that is helpful for testing. It is
 307   // called at the end of a GC and artificially expands the heap by
 308   // allocating a number of dead regions. This way we can induce very
 309   // frequent marking cycles and stress the cleanup / concurrent
 310   // cleanup code more (as all the regions that will be allocated by
 311   // this method will be found dead by the marking cycle).
 312   void allocate_dummy_regions() PRODUCT_RETURN;
 313 
 314   // If the HR printer is active, dump the state of the regions in the
 315   // heap after a compaction.
 316   void print_hrm_post_compaction();
 317 
 318   // Create a memory mapper for auxiliary data structures of the given size and
 319   // translation factor.
 320   static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description,
 321                                                          size_t size,
 322                                                          size_t translation_factor);
 323 
 324   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 325 
 326   // These are macros so that, if the assert fires, we get the correct
 327   // line number, file, etc.
 328 
 329 #define heap_locking_asserts_params(_extra_message_)                          \
 330   "%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s",            \
 331   (_extra_message_),                                                          \
 332   BOOL_TO_STR(Heap_lock->owned_by_self()),                                    \
 333   BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),                       \
 334   BOOL_TO_STR(Thread::current()->is_VM_thread())
 335 
 336 #define assert_heap_locked()                                                  \
 337   do {                                                                        \
 338     assert(Heap_lock->owned_by_self(),                                        \
 339            heap_locking_asserts_params("should be holding the Heap_lock"));   \
 340   } while (0)
 341 
 342 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_)             \
 343   do {                                                                        \
 344     assert(Heap_lock->owned_by_self() ||                                      \
 345            (SafepointSynchronize::is_at_safepoint() &&                        \
 346              ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
 347            heap_locking_asserts_params("should be holding the Heap_lock or "  \
 348                                         "should be at a safepoint"));         \
 349   } while (0)
 350 
 351 #define assert_heap_locked_and_not_at_safepoint()                             \
 352   do {                                                                        \
 353     assert(Heap_lock->owned_by_self() &&                                      \
 354                                     !SafepointSynchronize::is_at_safepoint(), \
 355           heap_locking_asserts_params("should be holding the Heap_lock and "  \
 356                                        "should not be at a safepoint"));      \
 357   } while (0)
 358 
 359 #define assert_heap_not_locked()                                              \
 360   do {                                                                        \
 361     assert(!Heap_lock->owned_by_self(),                                       \
 362         heap_locking_asserts_params("should not be holding the Heap_lock"));  \
 363   } while (0)
 364 
 365 #define assert_heap_not_locked_and_not_at_safepoint()                         \
 366   do {                                                                        \
 367     assert(!Heap_lock->owned_by_self() &&                                     \
 368                                     !SafepointSynchronize::is_at_safepoint(), \
 369       heap_locking_asserts_params("should not be holding the Heap_lock and "  \
 370                                    "should not be at a safepoint"));          \
 371   } while (0)
 372 
 373 #define assert_at_safepoint_on_vm_thread()                                    \
 374   do {                                                                        \
 375     assert_at_safepoint();                                                    \
 376     assert(Thread::current_or_null() != NULL, "no current thread");           \
 377     assert(Thread::current()->is_VM_thread(), "current thread is not VM thread"); \
 378   } while (0)
 379 
 380 #ifdef ASSERT
 381 #define assert_used_and_recalculate_used_equal(g1h)                           \
 382   do {                                                                        \
 383     size_t cur_used_bytes = g1h->used();                                      \
 384     size_t recal_used_bytes = g1h->recalculate_used();                        \
 385     assert(cur_used_bytes == recal_used_bytes, "Used(" SIZE_FORMAT ") is not" \
 386            " same as recalculated used(" SIZE_FORMAT ").",                    \
 387            cur_used_bytes, recal_used_bytes);                                 \
 388   } while (0)
 389 #else
 390 #define assert_used_and_recalculate_used_equal(g1h) do {} while(0)
 391 #endif
 392 
 393   static const uint MaxYoungGCNameLength = 128;
 394   // Sets given young_gc_name to the canonical young gc pause string. Young_gc_name
 395   // must be at least of length MaxYoungGCNameLength.
 396   void set_young_gc_name(char* young_gc_name);
 397 
 398   // The young region list.
 399   G1EdenRegions _eden;
 400   G1SurvivorRegions _survivor;
 401 
 402   STWGCTimer* _gc_timer_stw;
 403 
 404   G1NewTracer* _gc_tracer_stw;
 405 
 406   void gc_tracer_report_gc_start();
 407   void gc_tracer_report_gc_end(bool concurrent_operation_is_full_mark, G1EvacuationInfo& evacuation_info);
 408 
 409   // The current policy object for the collector.
 410   G1Policy* _policy;
 411   G1HeapSizingPolicy* _heap_sizing_policy;
 412 
 413   G1CollectionSet _collection_set;
 414 
 415   // Try to allocate a single non-humongous HeapRegion sufficient for
 416   // an allocation of the given word_size. If do_expand is true,
 417   // attempt to expand the heap if necessary to satisfy the allocation
 418   // request. 'type' takes the type of region to be allocated. (Use constants
 419   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
 420   HeapRegion* new_region(size_t word_size,
 421                          HeapRegionType type,
 422                          bool do_expand,
 423                          uint node_index = G1NUMA::AnyNodeIndex);
 424 
 425   // Initialize a contiguous set of free regions of length num_regions
 426   // and starting at index first so that they appear as a single
 427   // humongous region.
 428   HeapWord* humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
 429                                                       uint num_regions,
 430                                                       size_t word_size);
 431 
 432   // Attempt to allocate a humongous object of the given size. Return
 433   // NULL if unsuccessful.
 434   HeapWord* humongous_obj_allocate(size_t word_size);
 435 
 436   // The following two methods, allocate_new_tlab() and
 437   // mem_allocate(), are the two main entry points from the runtime
 438   // into the G1's allocation routines. They have the following
 439   // assumptions:
 440   //
 441   // * They should both be called outside safepoints.
 442   //
 443   // * They should both be called without holding the Heap_lock.
 444   //
 445   // * All allocation requests for new TLABs should go to
 446   //   allocate_new_tlab().
 447   //
 448   // * All non-TLAB allocation requests should go to mem_allocate().
 449   //
 450   // * If either call cannot satisfy the allocation request using the
 451   //   current allocating region, they will try to get a new one. If
 452   //   this fails, they will attempt to do an evacuation pause and
 453   //   retry the allocation.
 454   //
 455   // * If all allocation attempts fail, even after trying to schedule
 456   //   an evacuation pause, allocate_new_tlab() will return NULL,
 457   //   whereas mem_allocate() will attempt a heap expansion and/or
 458   //   schedule a Full GC.
 459   //
 460   // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
 461   //   should never be called with word_size being humongous. All
 462   //   humongous allocation requests should go to mem_allocate() which
 463   //   will satisfy them with a special path.
 464 
 465   virtual HeapWord* allocate_new_tlab(size_t min_size,
 466                                       size_t requested_size,
 467                                       size_t* actual_size);
 468 
 469   virtual HeapWord* mem_allocate(size_t word_size,
 470                                  bool*  gc_overhead_limit_was_exceeded);
 471 
 472   // First-level mutator allocation attempt: try to allocate out of
 473   // the mutator alloc region without taking the Heap_lock. This
 474   // should only be used for non-humongous allocations.
 475   inline HeapWord* attempt_allocation(size_t min_word_size,
 476                                       size_t desired_word_size,
 477                                       size_t* actual_word_size);
 478 
 479   // Second-level mutator allocation attempt: take the Heap_lock and
 480   // retry the allocation attempt, potentially scheduling a GC
 481   // pause. This should only be used for non-humongous allocations.
 482   HeapWord* attempt_allocation_slow(size_t word_size);
 483 
 484   // Takes the Heap_lock and attempts a humongous allocation. It can
 485   // potentially schedule a GC pause.
 486   HeapWord* attempt_allocation_humongous(size_t word_size);
 487 
 488   // Allocation attempt that should be called during safepoints (e.g.,
 489   // at the end of a successful GC). expect_null_mutator_alloc_region
 490   // specifies whether the mutator alloc region is expected to be NULL
 491   // or not.
 492   HeapWord* attempt_allocation_at_safepoint(size_t word_size,
 493                                             bool expect_null_mutator_alloc_region);
 494 
 495   // These methods are the "callbacks" from the G1AllocRegion class.
 496 
 497   // For mutator alloc regions.
 498   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
 499   void retire_mutator_alloc_region(HeapRegion* alloc_region,
 500                                    size_t allocated_bytes);
 501 
 502   // For GC alloc regions.
 503   bool has_more_regions(G1HeapRegionAttr dest);
 504   HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
 505   void retire_gc_alloc_region(HeapRegion* alloc_region,
 506                               size_t allocated_bytes, G1HeapRegionAttr dest);
 507 
 508   // - if explicit_gc is true, the GC is for a System.gc() etc,
 509   //   otherwise it's for a failed allocation.
 510   // - if clear_all_soft_refs is true, all soft references should be
 511   //   cleared during the GC.
 512   // - if do_maximum_compaction is true, full gc will do a maximally
 513   //   compacting collection, leaving no dead wood.
 514   // - it returns false if it is unable to do the collection due to the
 515   //   GC locker being active, true otherwise.
 516   bool do_full_collection(bool explicit_gc,
 517                           bool clear_all_soft_refs,
 518                           bool do_maximum_compaction);
 519 
 520   // Callback from VM_G1CollectFull operation, or collect_as_vm_thread.
 521   virtual void do_full_collection(bool clear_all_soft_refs);
 522 
 523   // Helper to do a full collection that clears soft references.
 524   bool upgrade_to_full_collection();
 525 
 526   // Callback from VM_G1CollectForAllocation operation.
 527   // This function does everything necessary/possible to satisfy a
 528   // failed allocation request (including collection, expansion, etc.)
 529   HeapWord* satisfy_failed_allocation(size_t word_size,
 530                                       bool* succeeded);
 531   // Internal helpers used during full GC to split it up to
 532   // increase readability.
 533   void abort_concurrent_cycle();
 534   void verify_before_full_collection(bool explicit_gc);
 535   void prepare_heap_for_full_collection();
 536   void prepare_heap_for_mutators();
 537   void abort_refinement();
 538   void verify_after_full_collection();
 539   void print_heap_after_full_collection(G1HeapTransition* heap_transition);
 540 
 541   // Helper method for satisfy_failed_allocation()
 542   HeapWord* satisfy_failed_allocation_helper(size_t word_size,
 543                                              bool do_gc,
 544                                              bool maximum_compaction,
 545                                              bool expect_null_mutator_alloc_region,
 546                                              bool* gc_succeeded);
 547 
 548   // Attempting to expand the heap sufficiently
 549   // to support an allocation of the given "word_size".  If
 550   // successful, perform the allocation and return the address of the
 551   // allocated block, or else "NULL".
 552   HeapWord* expand_and_allocate(size_t word_size);
 553 
 554   // Process any reference objects discovered.
 555   void process_discovered_references(G1ParScanThreadStateSet* per_thread_states);
 556 
 557   // If during a concurrent start pause we may install a pending list head which is not
 558   // otherwise reachable, ensure that it is marked in the bitmap for concurrent marking
 559   // to discover.
 560   void make_pending_list_reachable();
 561 
 562   void verify_numa_regions(const char* desc);
 563 
 564 public:
 565   G1ServiceThread* service_thread() const { return _service_thread; }
 566 
 567   WorkGang* workers() const { return _workers; }
 568 
 569   // Runs the given AbstractGangTask with the current active workers,
 570   // returning the total time taken.
 571   Tickspan run_task_timed(AbstractGangTask* task);
 572   // Run the given batch task using the work gang.
 573   void run_batch_task(G1BatchedGangTask* cl);
 574 
 575   G1Allocator* allocator() {
 576     return _allocator;
 577   }
 578 
 579   G1HeapVerifier* verifier() {
 580     return _verifier;
 581   }
 582 
 583   G1MonitoringSupport* g1mm() {
 584     assert(_g1mm != NULL, "should have been initialized");
 585     return _g1mm;
 586   }
 587 
 588   void resize_heap_if_necessary();
 589 
 590   // Check if there is memory to uncommit and if so schedule a task to do it.
 591   void uncommit_regions_if_necessary();
 592   // Immediately uncommit uncommittable regions.
 593   uint uncommit_regions(uint region_limit);
 594   bool has_uncommittable_regions();
 595 
 596   G1NUMA* numa() const { return _numa; }
 597 
 598   // Expand the garbage-first heap by at least the given size (in bytes!).
 599   // Returns true if the heap was expanded by the requested amount;
 600   // false otherwise.
 601   // (Rounds up to a HeapRegion boundary.)
 602   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
 603   bool expand_single_region(uint node_index);
 604 
 605   // Returns the PLAB statistics for a given destination.
 606   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
 607 
 608   // Determines PLAB size for a given destination.
 609   inline size_t desired_plab_sz(G1HeapRegionAttr dest);
 610 
 611   // Do anything common to GC's.
 612   void gc_prologue(bool full);
 613   void gc_epilogue(bool full);
 614 
 615   // Does the given region fulfill remembered set based eager reclaim candidate requirements?
 616   bool is_potential_eager_reclaim_candidate(HeapRegion* r) const;
 617 
 618   // Modify the reclaim candidate set and test for presence.
 619   // These are only valid for starts_humongous regions.
 620   inline void set_humongous_reclaim_candidate(uint region, bool value);
 621   inline bool is_humongous_reclaim_candidate(uint region);
 622 
 623   // Remove from the reclaim candidate set.  Also remove from the
 624   // collection set so that later encounters avoid the slow path.
 625   inline void set_humongous_is_live(oop obj);
 626 
 627   // Register the given region to be part of the collection set.
 628   inline void register_humongous_region_with_region_attr(uint index);
 629 
 630   // We register a region with the fast "in collection set" test. We
 631   // simply set to true the array slot corresponding to this region.
 632   void register_young_region_with_region_attr(HeapRegion* r) {
 633     _region_attr.set_in_young(r->hrm_index());
 634   }
 635   inline void register_region_with_region_attr(HeapRegion* r);
 636   inline void register_old_region_with_region_attr(HeapRegion* r);
 637   inline void register_optional_region_with_region_attr(HeapRegion* r);
 638 
 639   void clear_region_attr(const HeapRegion* hr) {
 640     _region_attr.clear(hr);
 641   }
 642 
 643   void clear_region_attr() {
 644     _region_attr.clear();
 645   }
 646 
 647   // Verify that the G1RegionAttr remset tracking corresponds to actual remset tracking
 648   // for all regions.
 649   void verify_region_attr_remset_update() PRODUCT_RETURN;
 650 
 651   bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
 652 
 653   // This is called at the start of either a concurrent cycle or a Full
 654   // GC to update the number of old marking cycles started.
 655   void increment_old_marking_cycles_started();
 656 
 657   // This is called at the end of either a concurrent cycle or a Full
 658   // GC to update the number of old marking cycles completed. Those two
 659   // can happen in a nested fashion, i.e., we start a concurrent
 660   // cycle, a Full GC happens half-way through it which ends first,
 661   // and then the cycle notices that a Full GC happened and ends
 662   // too. The concurrent parameter is a boolean to help us do a bit
 663   // tighter consistency checking in the method. If concurrent is
 664   // false, the caller is the inner caller in the nesting (i.e., the
 665   // Full GC). If concurrent is true, the caller is the outer caller
 666   // in this nesting (i.e., the concurrent cycle). Further nesting is
 667   // not currently supported. The end of this call also notifies
 668   // the G1OldGCCount_lock in case a Java thread is waiting for a full
 669   // GC to happen (e.g., it called System.gc() with
 670   // +ExplicitGCInvokesConcurrent).
 671   // whole_heap_examined should indicate that during that old marking
 672   // cycle the whole heap has been examined for live objects (as opposed
 673   // to only parts, or aborted before completion).
 674   void increment_old_marking_cycles_completed(bool concurrent, bool whole_heap_examined);
 675 
 676   uint old_marking_cycles_completed() {
 677     return _old_marking_cycles_completed;
 678   }
 679 
 680   G1HRPrinter* hr_printer() { return &_hr_printer; }
 681 
 682   // Allocates a new heap region instance.
 683   HeapRegion* new_heap_region(uint hrs_index, MemRegion mr);
 684 
 685   // Allocate the highest free region in the reserved heap. This will commit
 686   // regions as necessary.
 687   HeapRegion* alloc_highest_free_region();
 688 
 689   // Frees a region by resetting its metadata and adding it to the free list
 690   // passed as a parameter (this is usually a local list which will be appended
 691   // to the master free list later or NULL if free list management is handled
 692   // in another way).
 693   // Callers must ensure they are the only one calling free on the given region
 694   // at the same time.
 695   void free_region(HeapRegion* hr, FreeRegionList* free_list);
 696 
 697   // It dirties the cards that cover the block so that the post
 698   // write barrier never queues anything when updating objects on this
 699   // block. It is assumed (and in fact we assert) that the block
 700   // belongs to a young region.
 701   inline void dirty_young_block(HeapWord* start, size_t word_size);
 702 
 703   // Frees a humongous region by collapsing it into individual regions
 704   // and calling free_region() for each of them. The freed regions
 705   // will be added to the free list that's passed as a parameter (this
 706   // is usually a local list which will be appended to the master free
 707   // list later).
 708   // The method assumes that only a single thread is ever calling
 709   // this for a particular region at once.
 710   void free_humongous_region(HeapRegion* hr,
 711                              FreeRegionList* free_list);
 712 
 713   // Facility for allocating in 'archive' regions in high heap memory and
 714   // recording the allocated ranges. These should all be called from the
 715   // VM thread at safepoints, without the heap lock held. They can be used
 716   // to create and archive a set of heap regions which can be mapped at the
 717   // same fixed addresses in a subsequent JVM invocation.
 718   void begin_archive_alloc_range(bool open = false);
 719 
 720   // Check if the requested size would be too large for an archive allocation.
 721   bool is_archive_alloc_too_large(size_t word_size);
 722 
 723   // Allocate memory of the requested size from the archive region. This will
 724   // return NULL if the size is too large or if no memory is available. It
 725   // does not trigger a garbage collection.
 726   HeapWord* archive_mem_allocate(size_t word_size);
 727 
 728   // Optionally aligns the end address and returns the allocated ranges in
 729   // an array of MemRegions in order of ascending addresses.
 730   void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
 731                                size_t end_alignment_in_bytes = 0);
 732 
 733   // Facility for allocating a fixed range within the heap and marking
 734   // the containing regions as 'archive'. For use at JVM init time, when the
 735   // caller may mmap archived heap data at the specified range(s).
 736   // Verify that the MemRegions specified in the argument array are within the
 737   // reserved heap.
 738   bool check_archive_addresses(MemRegion* range, size_t count);
 739 
 740   // Commit the appropriate G1 regions containing the specified MemRegions
 741   // and mark them as 'archive' regions. The regions in the array must be
 742   // non-overlapping and in order of ascending address.
 743   bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
 744 
 745   // Insert any required filler objects in the G1 regions around the specified
 746   // ranges to make the regions parseable. This must be called after
 747   // alloc_archive_regions, and after class loading has occurred.
 748   void fill_archive_regions(MemRegion* range, size_t count);
 749 
 750   // Populate the G1BlockOffsetTablePart for archived regions with the given
 751   // memory ranges.
 752   void populate_archive_regions_bot_part(MemRegion* range, size_t count);
 753 
 754   // For each of the specified MemRegions, uncommit the containing G1 regions
 755   // which had been allocated by alloc_archive_regions. This should be called
 756   // rather than fill_archive_regions at JVM init time if the archive file
 757   // mapping failed, with the same non-overlapping and sorted MemRegion array.
 758   void dealloc_archive_regions(MemRegion* range, size_t count);
 759 
 760 private:
 761 
 762   // Shrink the garbage-first heap by at most the given size (in bytes!).
 763   // (Rounds down to a HeapRegion boundary.)
 764   void shrink(size_t shrink_bytes);
 765   void shrink_helper(size_t expand_bytes);
 766 
 767   #if TASKQUEUE_STATS
 768   static void print_taskqueue_stats_hdr(outputStream* const st);
 769   void print_taskqueue_stats() const;
 770   void reset_taskqueue_stats();
 771   #endif // TASKQUEUE_STATS
 772 
 773   // Start a concurrent cycle.
 774   void start_concurrent_cycle(bool concurrent_operation_is_full_mark);
 775 
 776   // Schedule the VM operation that will do an evacuation pause to
 777   // satisfy an allocation request of word_size. *succeeded will
 778   // return whether the VM operation was successful (it did do an
 779   // evacuation pause) or not (another thread beat us to it or the GC
 780   // locker was active). Given that we should not be holding the
 781   // Heap_lock when we enter this method, we will pass the
 782   // gc_count_before (i.e., total_collections()) as a parameter since
 783   // it has to be read while holding the Heap_lock. Currently, both
 784   // methods that call do_collection_pause() release the Heap_lock
 785   // before the call, so it's easy to read gc_count_before just before.
 786   HeapWord* do_collection_pause(size_t         word_size,
 787                                 uint           gc_count_before,
 788                                 bool*          succeeded,
 789                                 GCCause::Cause gc_cause);
 790 
 791   void wait_for_root_region_scanning();
 792 
 793   // Perform an incremental collection at a safepoint, possibly
 794   // followed by a by-policy upgrade to a full collection.  Returns
 795   // false if unable to do the collection due to the GC locker being
 796   // active, true otherwise.
 797   // precondition: at safepoint on VM thread
 798   // precondition: !is_gc_active()
 799   bool do_collection_pause_at_safepoint(double target_pause_time_ms);
 800 
 801   // Helper for do_collection_pause_at_safepoint, containing the guts
 802   // of the incremental collection pause, executed by the vm thread.
 803   void do_collection_pause_at_safepoint_helper(double target_pause_time_ms);
 804 
 805   G1HeapVerifier::G1VerifyType young_collection_verify_type() const;
 806   void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
 807   void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
 808 
 809   void calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms);
 810 
 811   // Actually do the work of evacuating the parts of the collection set.
 812   // The has_optional_evacuation_work flag for the initial collection set
 813   // evacuation indicates whether one or more optional evacuation steps may
 814   // follow.
 815   // If not set, G1 can avoid clearing the card tables of regions that we scan
 816   // for roots from the heap: when scanning the card table for dirty cards after
 817   // all remembered sets have been dumped onto it, for optional evacuation we
 818   // mark these cards as "Scanned" to know that we do not need to re-scan them
 819   // in the additional optional evacuation passes. This means that in the "Clear
 820   // Card Table" phase we need to clear those marks. However, if there is no
 821   // optional evacuation, g1 can immediately clean the dirty cards it encounters
 822   // as nobody else will be looking at them again, saving the clear card table
 823   // work later.
 824   // This case is very common (young only collections and most mixed gcs), so
 825   // depending on the ratio between scanned and evacuated regions (which g1 always
 826   // needs to clear), this is a big win.
 827   void evacuate_initial_collection_set(G1ParScanThreadStateSet* per_thread_states,
 828                                        bool has_optional_evacuation_work);
 829   void evacuate_optional_collection_set(G1ParScanThreadStateSet* per_thread_states);
 830 private:
 831   // Evacuate the next set of optional regions.
 832   void evacuate_next_optional_regions(G1ParScanThreadStateSet* per_thread_states);
 833 
 834 public:
 835   void pre_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 836   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info,
 837                                     G1RedirtyCardsQueueSet* rdcqs,
 838                                     G1ParScanThreadStateSet* pss);
 839 
 840   void expand_heap_after_young_collection();
 841   // Update object copying statistics.
 842   void record_obj_copy_mem_stats();
 843 
 844   // The hot card cache for remembered set insertion optimization.
 845   G1HotCardCache* _hot_card_cache;
 846 
 847   // The g1 remembered set of the heap.
 848   G1RemSet* _rem_set;
 849 
 850   void post_evacuate_cleanup_1(G1ParScanThreadStateSet* per_thread_states,
 851                                G1RedirtyCardsQueueSet* rdcqs);
 852   void post_evacuate_cleanup_2(PreservedMarksSet* preserved_marks,
 853                                G1RedirtyCardsQueueSet* rdcqs,
 854                                G1EvacuationInfo* evacuation_info,
 855                                const size_t* surviving_young_words);
 856 
 857   // After a collection pause, reset eden and the collection set.
 858   void clear_eden();
 859   void clear_collection_set();
 860 
 861   // Abandon the current collection set without recording policy
 862   // statistics or updating free lists.
 863   void abandon_collection_set(G1CollectionSet* collection_set);
 864 
 865   // The concurrent marker (and the thread it runs in.)
 866   G1ConcurrentMark* _cm;
 867   G1ConcurrentMarkThread* _cm_thread;
 868 
 869   // The concurrent refiner.
 870   G1ConcurrentRefine* _cr;
 871 
 872   // The parallel task queues
 873   G1ScannerTasksQueueSet *_task_queues;
 874 
 875   // Number of regions evacuation failed in the current collection.
 876   volatile uint _num_regions_failed_evacuation;
 877   // Records for every region on the heap whether evacuation failed for it.
 878   volatile bool* _regions_failed_evacuation;
 879 
 880   EvacuationFailedInfo* _evacuation_failed_info_array;
 881 
 882   PreservedMarksSet _preserved_marks_set;
 883 
 884   // Preserve the mark of "obj", if necessary, in preparation for its mark
 885   // word being overwritten with a self-forwarding-pointer.
 886   void preserve_mark_during_evac_failure(uint worker_id, oop obj, markWord m);
 887 
 888 #ifndef PRODUCT
 889   // Support for forcing evacuation failures. Analogous to
 890   // PromotionFailureALot for the other collectors.
 891 
 892   // Records whether G1EvacuationFailureALot should be in effect
 893   // for the current GC
 894   bool _evacuation_failure_alot_for_current_gc;
 895 
 896   // Used to record the GC number for interval checking when
 897   // determining whether G1EvaucationFailureALot is in effect
 898   // for the current GC.
 899   size_t _evacuation_failure_alot_gc_number;
 900 
 901   // Count of the number of evacuations between failures.
 902   volatile size_t _evacuation_failure_alot_count;
 903 
 904   // Set whether G1EvacuationFailureALot should be in effect
 905   // for the current GC (based upon the type of GC and which
 906   // command line flags are set);
 907   inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc,
 908                                                   bool during_concurrent_start,
 909                                                   bool mark_or_rebuild_in_progress);
 910 
 911   inline void set_evacuation_failure_alot_for_current_gc();
 912 
 913   // Return true if it's time to cause an evacuation failure.
 914   inline bool evacuation_should_fail();
 915 
 916   // Reset the G1EvacuationFailureALot counters.  Should be called at
 917   // the end of an evacuation pause in which an evacuation failure occurred.
 918   inline void reset_evacuation_should_fail();
 919 #endif // !PRODUCT
 920 
 921   // ("Weak") Reference processing support.
 922   //
 923   // G1 has 2 instances of the reference processor class. One
 924   // (_ref_processor_cm) handles reference object discovery
 925   // and subsequent processing during concurrent marking cycles.
 926   //
 927   // The other (_ref_processor_stw) handles reference object
 928   // discovery and processing during full GCs and incremental
 929   // evacuation pauses.
 930   //
 931   // During an incremental pause, reference discovery will be
 932   // temporarily disabled for _ref_processor_cm and will be
 933   // enabled for _ref_processor_stw. At the end of the evacuation
 934   // pause references discovered by _ref_processor_stw will be
 935   // processed and discovery will be disabled. The previous
 936   // setting for reference object discovery for _ref_processor_cm
 937   // will be re-instated.
 938   //
 939   // At the start of marking:
 940   //  * Discovery by the CM ref processor is verified to be inactive
 941   //    and it's discovered lists are empty.
 942   //  * Discovery by the CM ref processor is then enabled.
 943   //
 944   // At the end of marking:
 945   //  * Any references on the CM ref processor's discovered
 946   //    lists are processed (possibly MT).
 947   //
 948   // At the start of full GC we:
 949   //  * Disable discovery by the CM ref processor and
 950   //    empty CM ref processor's discovered lists
 951   //    (without processing any entries).
 952   //  * Verify that the STW ref processor is inactive and it's
 953   //    discovered lists are empty.
 954   //  * Temporarily set STW ref processor discovery as single threaded.
 955   //  * Temporarily clear the STW ref processor's _is_alive_non_header
 956   //    field.
 957   //  * Finally enable discovery by the STW ref processor.
 958   //
 959   // The STW ref processor is used to record any discovered
 960   // references during the full GC.
 961   //
 962   // At the end of a full GC we:
 963   //  * Enqueue any reference objects discovered by the STW ref processor
 964   //    that have non-live referents. This has the side-effect of
 965   //    making the STW ref processor inactive by disabling discovery.
 966   //  * Verify that the CM ref processor is still inactive
 967   //    and no references have been placed on it's discovered
 968   //    lists (also checked as a precondition during concurrent start).
 969 
 970   // The (stw) reference processor...
 971   ReferenceProcessor* _ref_processor_stw;
 972 
 973   // During reference object discovery, the _is_alive_non_header
 974   // closure (if non-null) is applied to the referent object to
 975   // determine whether the referent is live. If so then the
 976   // reference object does not need to be 'discovered' and can
 977   // be treated as a regular oop. This has the benefit of reducing
 978   // the number of 'discovered' reference objects that need to
 979   // be processed.
 980   //
 981   // Instance of the is_alive closure for embedding into the
 982   // STW reference processor as the _is_alive_non_header field.
 983   // Supplying a value for the _is_alive_non_header field is
 984   // optional but doing so prevents unnecessary additions to
 985   // the discovered lists during reference discovery.
 986   G1STWIsAliveClosure _is_alive_closure_stw;
 987 
 988   G1STWSubjectToDiscoveryClosure _is_subject_to_discovery_stw;
 989 
 990   // The (concurrent marking) reference processor...
 991   ReferenceProcessor* _ref_processor_cm;
 992 
 993   // Instance of the concurrent mark is_alive closure for embedding
 994   // into the Concurrent Marking reference processor as the
 995   // _is_alive_non_header field. Supplying a value for the
 996   // _is_alive_non_header field is optional but doing so prevents
 997   // unnecessary additions to the discovered lists during reference
 998   // discovery.
 999   G1CMIsAliveClosure _is_alive_closure_cm;
1000 
1001   G1CMSubjectToDiscoveryClosure _is_subject_to_discovery_cm;
1002 public:
1003 
1004   G1ScannerTasksQueue* task_queue(uint i) const;
1005 
1006   uint num_task_queues() const;
1007 
1008   // Create a G1CollectedHeap.
1009   // Must call the initialize method afterwards.
1010   // May not return if something goes wrong.
1011   G1CollectedHeap();
1012 
1013 private:
1014   jint initialize_concurrent_refinement();
1015   jint initialize_service_thread();
1016 public:
1017   // Initialize the G1CollectedHeap to have the initial and
1018   // maximum sizes and remembered and barrier sets
1019   // specified by the policy object.
1020   jint initialize();
1021 
1022   virtual void stop();
1023   virtual void safepoint_synchronize_begin();
1024   virtual void safepoint_synchronize_end();
1025 
1026   // Does operations required after initialization has been done.
1027   void post_initialize();
1028 
1029   // Initialize weak reference processing.
1030   void ref_processing_init();
1031 
1032   virtual Name kind() const {
1033     return CollectedHeap::G1;
1034   }
1035 
1036   virtual const char* name() const {
1037     return "G1";
1038   }
1039 
1040   const G1CollectorState* collector_state() const { return &_collector_state; }
1041   G1CollectorState* collector_state() { return &_collector_state; }
1042 
1043   // The current policy object for the collector.
1044   G1Policy* policy() const { return _policy; }
1045   // The remembered set.
1046   G1RemSet* rem_set() const { return _rem_set; }
1047 
1048   inline G1GCPhaseTimes* phase_times() const;
1049 
1050   const G1CollectionSet* collection_set() const { return &_collection_set; }
1051   G1CollectionSet* collection_set() { return &_collection_set; }
1052 
1053   virtual SoftRefPolicy* soft_ref_policy();
1054 
1055   virtual void initialize_serviceability();
1056   virtual MemoryUsage memory_usage();
1057   virtual GrowableArray<GCMemoryManager*> memory_managers();
1058   virtual GrowableArray<MemoryPool*> memory_pools();
1059 
1060   // Try to minimize the remembered set.
1061   void scrub_rem_set();
1062 
1063   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
1064   void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
1065 
1066   // The shared block offset table array.
1067   G1BlockOffsetTable* bot() const { return _bot; }
1068 
1069   // Reference Processing accessors
1070 
1071   // The STW reference processor....
1072   ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1073 
1074   G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; }
1075 
1076   // The Concurrent Marking reference processor...
1077   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1078 
1079   size_t unused_committed_regions_in_bytes() const;
1080 
1081   virtual size_t capacity() const;
1082   virtual size_t used() const;
1083   // This should be called when we're not holding the heap lock. The
1084   // result might be a bit inaccurate.
1085   size_t used_unlocked() const;
1086   size_t recalculate_used() const;
1087 
1088   // These virtual functions do the actual allocation.
1089   // Some heaps may offer a contiguous region for shared non-blocking
1090   // allocation, via inlined code (by exporting the address of the top and
1091   // end fields defining the extent of the contiguous allocation region.)
1092   // But G1CollectedHeap doesn't yet support this.
1093 
1094   virtual bool is_maximal_no_gc() const {
1095     return _hrm.available() == 0;
1096   }
1097 
1098   // Returns true if an incremental GC should be upgrade to a full gc. This
1099   // is done when there are no free regions and the heap can't be expanded.
1100   bool should_upgrade_to_full_gc() const {
1101     return is_maximal_no_gc() && num_free_regions() == 0;
1102   }
1103 
1104   // The current number of regions in the heap.
1105   uint num_regions() const { return _hrm.length(); }
1106 
1107   // The max number of regions reserved for the heap. Except for static array
1108   // sizing purposes you probably want to use max_regions().
1109   uint max_reserved_regions() const { return _hrm.reserved_length(); }
1110 
1111   // Max number of regions that can be committed.
1112   uint max_regions() const { return _hrm.max_length(); }
1113 
1114   // The number of regions that are completely free.
1115   uint num_free_regions() const { return _hrm.num_free_regions(); }
1116 
1117   // The number of regions that can be allocated into.
1118   uint num_free_or_available_regions() const { return num_free_regions() + _hrm.available(); }
1119 
1120   MemoryUsage get_auxiliary_data_memory_usage() const {
1121     return _hrm.get_auxiliary_data_memory_usage();
1122   }
1123 
1124   // The number of regions that are not completely free.
1125   uint num_used_regions() const { return num_regions() - num_free_regions(); }
1126 
1127 #ifdef ASSERT
1128   bool is_on_master_free_list(HeapRegion* hr) {
1129     return _hrm.is_free(hr);
1130   }
1131 #endif // ASSERT
1132 
1133   inline void old_set_add(HeapRegion* hr);
1134   inline void old_set_remove(HeapRegion* hr);
1135 
1136   inline void archive_set_add(HeapRegion* hr);
1137 
1138   size_t non_young_capacity_bytes() {
1139     return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
1140   }
1141 
1142   // Determine whether the given region is one that we are using as an
1143   // old GC alloc region.
1144   bool is_old_gc_alloc_region(HeapRegion* hr);
1145 
1146   // Perform a collection of the heap; intended for use in implementing
1147   // "System.gc".  This probably implies as full a collection as the
1148   // "CollectedHeap" supports.
1149   virtual void collect(GCCause::Cause cause);
1150 
1151   // Perform a collection of the heap with the given cause.
1152   // Returns whether this collection actually executed.
1153   bool try_collect(GCCause::Cause cause);
1154 
1155   // True iff an evacuation has failed in the most-recent collection.
1156   inline bool evacuation_failed() const;
1157   // True iff the given region encountered an evacuation failure in the most-recent
1158   // collection.
1159   inline bool evacuation_failed(uint region_idx) const;
1160 
1161   inline uint num_regions_failed_evacuation() const;
1162   // Notify that the garbage collection encountered an evacuation failure in the
1163   // given region. Returns whether this has been the first occurrence of an evacuation
1164   // failure in that region.
1165   inline bool notify_region_failed_evacuation(uint const region_idx);
1166 
1167   void remove_from_old_gen_sets(const uint old_regions_removed,
1168                                 const uint archive_regions_removed,
1169                                 const uint humongous_regions_removed);
1170   void prepend_to_freelist(FreeRegionList* list);
1171   void decrement_summary_bytes(size_t bytes);
1172 
1173   virtual bool is_in(const void* p) const;
1174 
1175   // Return "TRUE" iff the given object address is within the collection
1176   // set. Assumes that the reference points into the heap.
1177   inline bool is_in_cset(const HeapRegion *hr);
1178   inline bool is_in_cset(oop obj);
1179   inline bool is_in_cset(HeapWord* addr);
1180 
1181   inline bool is_in_cset_or_humongous(const oop obj);
1182 
1183  private:
1184   // This array is used for a quick test on whether a reference points into
1185   // the collection set or not. Each of the array's elements denotes whether the
1186   // corresponding region is in the collection set or not.
1187   G1HeapRegionAttrBiasedMappedArray _region_attr;
1188 
1189  public:
1190 
1191   inline G1HeapRegionAttr region_attr(const void* obj) const;
1192   inline G1HeapRegionAttr region_attr(uint idx) const;
1193 
1194   MemRegion reserved() const {
1195     return _hrm.reserved();
1196   }
1197 
1198   bool is_in_reserved(const void* addr) const {
1199     return reserved().contains(addr);
1200   }
1201 
1202   G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
1203 
1204   G1CardTable* card_table() const {
1205     return _card_table;
1206   }
1207 
1208   // Iteration functions.
1209 
1210   void object_iterate_parallel(ObjectClosure* cl, uint worker_id, HeapRegionClaimer* claimer);
1211 
1212   // Iterate over all objects, calling "cl.do_object" on each.
1213   virtual void object_iterate(ObjectClosure* cl);
1214 
1215   virtual ParallelObjectIterator* parallel_object_iterator(uint thread_num);
1216 
1217   // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1218   virtual void keep_alive(oop obj);
1219 
1220   // Iterate over heap regions, in address order, terminating the
1221   // iteration early if the "do_heap_region" method returns "true".
1222   void heap_region_iterate(HeapRegionClosure* blk) const;
1223 
1224   // Return the region with the given index. It assumes the index is valid.
1225   inline HeapRegion* region_at(uint index) const;
1226   inline HeapRegion* region_at_or_null(uint index) const;
1227 
1228   // Return the next region (by index) that is part of the same
1229   // humongous object that hr is part of.
1230   inline HeapRegion* next_region_in_humongous(HeapRegion* hr) const;
1231 
1232   // Calculate the region index of the given address. Given address must be
1233   // within the heap.
1234   inline uint addr_to_region(HeapWord* addr) const;
1235 
1236   inline HeapWord* bottom_addr_for_region(uint index) const;
1237 
1238   // Two functions to iterate over the heap regions in parallel. Threads
1239   // compete using the HeapRegionClaimer to claim the regions before
1240   // applying the closure on them.
1241   // The _from_worker_offset version uses the HeapRegionClaimer and
1242   // the worker id to calculate a start offset to prevent all workers to
1243   // start from the point.
1244   void heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
1245                                                   HeapRegionClaimer* hrclaimer,
1246                                                   uint worker_id) const;
1247 
1248   void heap_region_par_iterate_from_start(HeapRegionClosure* cl,
1249                                           HeapRegionClaimer* hrclaimer) const;
1250 
1251   // Iterate over all regions in the collection set in parallel.
1252   void collection_set_par_iterate_all(HeapRegionClosure* cl,
1253                                       HeapRegionClaimer* hr_claimer,
1254                                       uint worker_id);
1255 
1256   // Iterate over all regions currently in the current collection set.
1257   void collection_set_iterate_all(HeapRegionClosure* blk);
1258 
1259   // Iterate over the regions in the current increment of the collection set.
1260   // Starts the iteration so that the start regions of a given worker id over the
1261   // set active_workers are evenly spread across the set of collection set regions
1262   // to be iterated.
1263   // The variant with the HeapRegionClaimer guarantees that the closure will be
1264   // applied to a particular region exactly once.
1265   void collection_set_iterate_increment_from(HeapRegionClosure *blk, uint worker_id) {
1266     collection_set_iterate_increment_from(blk, NULL, worker_id);
1267   }
1268   void collection_set_iterate_increment_from(HeapRegionClosure *blk, HeapRegionClaimer* hr_claimer, uint worker_id);
1269 
1270   // Returns the HeapRegion that contains addr. addr must not be NULL.
1271   template <class T>
1272   inline HeapRegion* heap_region_containing(const T addr) const;
1273 
1274   // Returns the HeapRegion that contains addr, or NULL if that is an uncommitted
1275   // region. addr must not be NULL.
1276   template <class T>
1277   inline HeapRegion* heap_region_containing_or_null(const T addr) const;
1278 
1279   // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1280   // each address in the (reserved) heap is a member of exactly
1281   // one block.  The defining characteristic of a block is that it is
1282   // possible to find its size, and thus to progress forward to the next
1283   // block.  (Blocks may be of different sizes.)  Thus, blocks may
1284   // represent Java objects, or they might be free blocks in a
1285   // free-list-based heap (or subheap), as long as the two kinds are
1286   // distinguishable and the size of each is determinable.
1287 
1288   // Returns the address of the start of the "block" that contains the
1289   // address "addr".  We say "blocks" instead of "object" since some heaps
1290   // may not pack objects densely; a chunk may either be an object or a
1291   // non-object.
1292   HeapWord* block_start(const void* addr) const;
1293 
1294   // Requires "addr" to be the start of a block, and returns "TRUE" iff
1295   // the block is an object.
1296   bool block_is_obj(const HeapWord* addr) const;
1297 
1298   // Section on thread-local allocation buffers (TLABs)
1299   // See CollectedHeap for semantics.
1300 
1301   size_t tlab_capacity(Thread* ignored) const;
1302   size_t tlab_used(Thread* ignored) const;
1303   size_t max_tlab_size() const;
1304   size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1305 
1306   inline bool is_in_young(const oop obj);
1307 
1308   // Returns "true" iff the given word_size is "very large".
1309   static bool is_humongous(size_t word_size) {
1310     // Note this has to be strictly greater-than as the TLABs
1311     // are capped at the humongous threshold and we want to
1312     // ensure that we don't try to allocate a TLAB as
1313     // humongous and that we don't allocate a humongous
1314     // object in a TLAB.
1315     return word_size > _humongous_object_threshold_in_words;
1316   }
1317 
1318   // Returns the humongous threshold for a specific region size
1319   static size_t humongous_threshold_for(size_t region_size) {
1320     return (region_size / 2);
1321   }
1322 
1323   // Returns the number of regions the humongous object of the given word size
1324   // requires.
1325   static size_t humongous_obj_size_in_regions(size_t word_size);
1326 
1327   // Print the maximum heap capacity.
1328   virtual size_t max_capacity() const;
1329 
1330   Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }
1331 
1332   // Convenience function to be used in situations where the heap type can be
1333   // asserted to be this type.
1334   static G1CollectedHeap* heap() {
1335     return named_heap<G1CollectedHeap>(CollectedHeap::G1);
1336   }
1337 
1338   void set_region_short_lived_locked(HeapRegion* hr);
1339   // add appropriate methods for any other surv rate groups
1340 
1341   const G1SurvivorRegions* survivor() const { return &_survivor; }
1342 
1343   uint eden_regions_count() const { return _eden.length(); }
1344   uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
1345   uint survivor_regions_count() const { return _survivor.length(); }
1346   uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
1347   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
1348   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
1349   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
1350   uint old_regions_count() const { return _old_set.length(); }
1351   uint archive_regions_count() const { return _archive_set.length(); }
1352   uint humongous_regions_count() const { return _humongous_set.length(); }
1353 
1354 #ifdef ASSERT
1355   bool check_young_list_empty();
1356 #endif
1357 
1358   bool is_marked_next(oop obj) const;
1359 
1360   // Determine if an object is dead, given the object and also
1361   // the region to which the object belongs.
1362   bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1363     return hr->is_obj_dead(obj, _cm->prev_mark_bitmap());
1364   }
1365 
1366   // This function returns true when an object has been
1367   // around since the previous marking and hasn't yet
1368   // been marked during this marking, and is not in a closed archive region.
1369   bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1370     return
1371       !hr->obj_allocated_since_next_marking(obj) &&
1372       !is_marked_next(obj) &&
1373       !hr->is_closed_archive();
1374   }
1375 
1376   // Determine if an object is dead, given only the object itself.
1377   // This will find the region to which the object belongs and
1378   // then call the region version of the same function.
1379 
1380   // Added if it is NULL it isn't dead.
1381 
1382   inline bool is_obj_dead(const oop obj) const;
1383 
1384   inline bool is_obj_ill(const oop obj) const;
1385 
1386   inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
1387   inline bool is_obj_dead_full(const oop obj) const;
1388 
1389   G1ConcurrentMark* concurrent_mark() const { return _cm; }
1390 
1391   // Refinement
1392 
1393   G1ConcurrentRefine* concurrent_refine() const { return _cr; }
1394 
1395   // Optimized nmethod scanning support routines
1396 
1397   // Register the given nmethod with the G1 heap.
1398   virtual void register_nmethod(nmethod* nm);
1399 
1400   // Unregister the given nmethod from the G1 heap.
1401   virtual void unregister_nmethod(nmethod* nm);
1402 
1403   // No nmethod flushing needed.
1404   virtual void flush_nmethod(nmethod* nm) {}
1405 
1406   // No nmethod verification implemented.
1407   virtual void verify_nmethod(nmethod* nm) {}
1408 
1409   // Recalculate amount of used memory after GC. Must be called after all allocation
1410   // has finished.
1411   void update_used_after_gc();
1412   // Reset and re-enable the hot card cache.
1413   // Note the counts for the cards in the regions in the
1414   // collection set are reset when the collection set is freed.
1415   void reset_hot_card_cache();
1416   // Free up superfluous code root memory.
1417   void purge_code_root_memory();
1418 
1419   // Rebuild the strong code root lists for each region
1420   // after a full GC.
1421   void rebuild_strong_code_roots();
1422 
1423   // Performs cleaning of data structures after class unloading.
1424   void complete_cleaning(BoolObjectClosure* is_alive, bool class_unloading_occurred);
1425 
1426   // Verification
1427 
1428   // Perform any cleanup actions necessary before allowing a verification.
1429   virtual void prepare_for_verify();
1430 
1431   // Perform verification.
1432 
1433   // vo == UsePrevMarking -> use "prev" marking information,
1434   // vo == UseNextMarking -> use "next" marking information
1435   // vo == UseFullMarking -> use "next" marking bitmap but no TAMS
1436   //
1437   // NOTE: Only the "prev" marking information is guaranteed to be
1438   // consistent most of the time, so most calls to this should use
1439   // vo == UsePrevMarking.
1440   // Currently, there is only one case where this is called with
1441   // vo == UseNextMarking, which is to verify the "next" marking
1442   // information at the end of remark.
1443   // Currently there is only one place where this is called with
1444   // vo == UseFullMarking, which is to verify the marking during a
1445   // full GC.
1446   void verify(VerifyOption vo);
1447 
1448   // WhiteBox testing support.
1449   virtual bool supports_concurrent_gc_breakpoints() const;
1450 
1451   virtual WorkGang* safepoint_workers() { return _workers; }
1452 
1453   virtual bool is_archived_object(oop object) const;
1454 
1455   // The methods below are here for convenience and dispatch the
1456   // appropriate method depending on value of the given VerifyOption
1457   // parameter. The values for that parameter, and their meanings,
1458   // are the same as those above.
1459 
1460   bool is_obj_dead_cond(const oop obj,
1461                         const HeapRegion* hr,
1462                         const VerifyOption vo) const;
1463 
1464   bool is_obj_dead_cond(const oop obj,
1465                         const VerifyOption vo) const;
1466 
1467   G1HeapSummary create_g1_heap_summary();
1468   G1EvacSummary create_g1_evac_summary(G1EvacStats* stats);
1469 
1470   // Printing
1471 private:
1472   void print_heap_regions() const;
1473   void print_regions_on(outputStream* st) const;
1474 
1475 public:
1476   virtual void print_on(outputStream* st) const;
1477   virtual void print_extended_on(outputStream* st) const;
1478   virtual void print_on_error(outputStream* st) const;
1479 
1480   virtual void gc_threads_do(ThreadClosure* tc) const;
1481 
1482   // Override
1483   void print_tracing_info() const;
1484 
1485   // The following two methods are helpful for debugging RSet issues.
1486   void print_cset_rsets() PRODUCT_RETURN;
1487   void print_all_rsets() PRODUCT_RETURN;
1488 
1489   // Used to print information about locations in the hs_err file.
1490   virtual bool print_location(outputStream* st, void* addr) const;
1491 };
1492 
1493 class G1ParEvacuateFollowersClosure : public VoidClosure {
1494 private:
1495   double _start_term;
1496   double _term_time;
1497   size_t _term_attempts;
1498 
1499   void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
1500   void end_term_time() { _term_time += (os::elapsedTime() - _start_term); }
1501 protected:
1502   G1CollectedHeap*              _g1h;
1503   G1ParScanThreadState*         _par_scan_state;
1504   G1ScannerTasksQueueSet*       _queues;
1505   TaskTerminator*               _terminator;
1506   G1GCPhaseTimes::GCParPhases   _phase;
1507 
1508   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
1509   G1ScannerTasksQueueSet* queues()         { return _queues; }
1510   TaskTerminator*         terminator()     { return _terminator; }
1511 
1512 public:
1513   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
1514                                 G1ParScanThreadState* par_scan_state,
1515                                 G1ScannerTasksQueueSet* queues,
1516                                 TaskTerminator* terminator,
1517                                 G1GCPhaseTimes::GCParPhases phase)
1518     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
1519       _g1h(g1h), _par_scan_state(par_scan_state),
1520       _queues(queues), _terminator(terminator), _phase(phase) {}
1521 
1522   void do_void();
1523 
1524   double term_time() const { return _term_time; }
1525   size_t term_attempts() const { return _term_attempts; }
1526 
1527 private:
1528   inline bool offer_termination();
1529 };
1530 
1531 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_HPP