1 /*
   2  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTIONSET_HPP
  26 #define SHARE_GC_G1_G1COLLECTIONSET_HPP
  27 
  28 #include "utilities/debug.hpp"
  29 #include "utilities/globalDefinitions.hpp"
  30 
  31 class G1CollectedHeap;
  32 class G1CollectionSetCandidates;
  33 class G1CollectorState;
  34 class G1GCPhaseTimes;
  35 class G1ParScanThreadStateSet;
  36 class G1Policy;
  37 class G1SurvivorRegions;
  38 class HeapRegion;
  39 class HeapRegionClosure;
  40 
  41 class G1CollectionSet {
  42   G1CollectedHeap* _g1h;
  43   G1Policy* _policy;
  44 
  45   // All old gen collection set candidate regions for the current mixed gc phase.
  46   G1CollectionSetCandidates* _candidates;
  47 
  48   uint _eden_region_length;
  49   uint _survivor_region_length;
  50   uint _old_region_length;
  51 
  52   // The actual collection set as a set of region indices.
  53   // All entries in _collection_set_regions below _collection_set_cur_length are
  54   // assumed to be valid entries.
  55   // We assume that at any time there is at most only one writer and (one or more)
  56   // concurrent readers. This means we are good with using storestore and loadload
  57   // barriers on the writer and reader respectively only.
  58   uint* _collection_set_regions;
  59   volatile size_t _collection_set_cur_length;
  60   size_t _collection_set_max_length;
  61 
  62   // When doing mixed collections we can add old regions to the collection, which
  63   // can be collected if there is enough time. We call these optional regions and
  64   // the pointer to these regions are stored in the array below.
  65   HeapRegion** _optional_regions;
  66   uint _optional_region_length;
  67   uint _optional_region_max_length;
  68 
  69   // The number of bytes in the collection set before the pause. Set from
  70   // the incrementally built collection set at the start of an evacuation
  71   // pause, and incremented in finalize_old_part() when adding old regions
  72   // (if any) to the collection set.
  73   size_t _bytes_used_before;
  74 
  75   size_t _recorded_rs_lengths;
  76 
  77   // The associated information that is maintained while the incremental
  78   // collection set is being built with young regions. Used to populate
  79   // the recorded info for the evacuation pause.
  80 
  81   enum CSetBuildType {
  82     Active,             // We are actively building the collection set
  83     Inactive            // We are not actively building the collection set
  84   };
  85 
  86   CSetBuildType _inc_build_state;
  87 
  88   // The number of bytes in the incrementally built collection set.
  89   // Used to set _collection_set_bytes_used_before at the start of
  90   // an evacuation pause.
  91   size_t _inc_bytes_used_before;
  92 
  93   // The RSet lengths recorded for regions in the CSet. It is updated
  94   // by the thread that adds a new region to the CSet. We assume that
  95   // only one thread can be allocating a new CSet region (currently,
  96   // it does so after taking the Heap_lock) hence no need to
  97   // synchronize updates to this field.
  98   size_t _inc_recorded_rs_lengths;
  99 
 100   // A concurrent refinement thread periodically samples the young
 101   // region RSets and needs to update _inc_recorded_rs_lengths as
 102   // the RSets grow. Instead of having to synchronize updates to that
 103   // field we accumulate them in this field and add it to
 104   // _inc_recorded_rs_lengths_diffs at the start of a GC.
 105   ssize_t _inc_recorded_rs_lengths_diffs;
 106 
 107   // The predicted elapsed time it will take to collect the regions in
 108   // the CSet. This is updated by the thread that adds a new region to
 109   // the CSet. See the comment for _inc_recorded_rs_lengths about
 110   // MT-safety assumptions.
 111   double _inc_predicted_elapsed_time_ms;
 112 
 113   // See the comment for _inc_recorded_rs_lengths_diffs.
 114   double _inc_predicted_elapsed_time_ms_diffs;
 115 
 116   G1CollectorState* collector_state();
 117   G1GCPhaseTimes* phase_times();
 118 
 119   void verify_young_cset_indices() const NOT_DEBUG_RETURN;
 120   void add_as_optional(HeapRegion* hr);
 121   void add_as_old(HeapRegion* hr);
 122   bool optional_is_full();
 123 
 124 public:
 125   G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
 126   ~G1CollectionSet();
 127 
 128   // Initializes the collection set giving the maximum possible length of the collection set.
 129   void initialize(uint max_region_length);
 130   void initialize_optional(uint max_length);
 131   void free_optional_regions();
 132 
 133   void clear_candidates();
 134 
 135   void set_candidates(G1CollectionSetCandidates* candidates) {
 136     assert(_candidates == NULL, "Trying to replace collection set candidates.");
 137     _candidates = candidates;
 138   }
 139   G1CollectionSetCandidates* candidates() { return _candidates; }
 140 
 141   void init_region_lengths(uint eden_cset_region_length,
 142                            uint survivor_cset_region_length);
 143 
 144   void set_recorded_rs_lengths(size_t rs_lengths);
 145 
 146   uint region_length() const       { return young_region_length() +
 147                                             old_region_length(); }
 148   uint young_region_length() const { return eden_region_length() +
 149                                             survivor_region_length(); }
 150 
 151   uint eden_region_length() const     { return _eden_region_length;     }
 152   uint survivor_region_length() const { return _survivor_region_length; }
 153   uint old_region_length() const      { return _old_region_length;      }
 154   uint optional_region_length() const { return _optional_region_length; }
 155 
 156   // Incremental collection set support
 157 
 158   // Initialize incremental collection set info.
 159   void start_incremental_building();
 160 
 161   // Perform any final calculations on the incremental collection set fields
 162   // before we can use them.
 163   void finalize_incremental_building();
 164 
 165   // Reset the contents of the collection set.
 166   void clear();
 167 
 168   // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
 169   // If may_be_aborted is true, iteration may be aborted using the return value of the
 170   // called closure method.
 171   void iterate(HeapRegionClosure* cl) const;
 172 
 173   // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
 174   // trying to optimally spread out starting position of total_workers workers given the
 175   // caller's worker_id.
 176   void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
 177 
 178   // Stop adding regions to the incremental collection set.
 179   void stop_incremental_building() { _inc_build_state = Inactive; }
 180 
 181   size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
 182 
 183   size_t bytes_used_before() const {
 184     return _bytes_used_before;
 185   }
 186 
 187   void reset_bytes_used_before() {
 188     _bytes_used_before = 0;
 189   }
 190 
 191   // Choose a new collection set.  Marks the chosen regions as being
 192   // "in_collection_set".
 193   double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
 194   void finalize_old_part(double time_remaining_ms);
 195 
 196   // Add old region "hr" to the collection set.
 197   void add_old_region(HeapRegion* hr);
 198 
 199   // Add old region "hr" to optional collection set.
 200   void add_optional_region(HeapRegion* hr);
 201 
 202   // Update information about hr in the aggregated information for
 203   // the incrementally built collection set.
 204   void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
 205 
 206   // Add eden region to the collection set.
 207   void add_eden_region(HeapRegion* hr);
 208 
 209   // Add survivor region to the collection set.
 210   void add_survivor_regions(HeapRegion* hr);
 211 
 212 #ifndef PRODUCT
 213   bool verify_young_ages();
 214 
 215   void print(outputStream* st);
 216 #endif // !PRODUCT
 217 
 218   double predict_region_elapsed_time_ms(HeapRegion* hr);
 219 
 220   void clear_optional_region(const HeapRegion* hr);
 221 
 222   HeapRegion* optional_region_at(uint i) const {
 223     assert(_optional_regions != NULL, "Not yet initialized");
 224     assert(i < _optional_region_length, "index %u out of bounds (%u)", i, _optional_region_length);
 225     return _optional_regions[i];
 226   }
 227 
 228   HeapRegion* remove_last_optional_region() {
 229     assert(_optional_regions != NULL, "Not yet initialized");
 230     assert(_optional_region_length != 0, "No region to remove");
 231     _optional_region_length--;
 232     HeapRegion* removed = _optional_regions[_optional_region_length];
 233     _optional_regions[_optional_region_length] = NULL;
 234     return removed;
 235   }
 236 
 237 private:
 238   // Update the incremental collection set information when adding a region.
 239   void add_young_region_common(HeapRegion* hr);
 240 };
 241 
 242 // Helper class to manage the optional regions in a Mixed collection.
 243 class G1OptionalCSet : public StackObj {
 244 private:
 245   G1CollectionSet* _cset;
 246   G1ParScanThreadStateSet* _pset;
 247   uint _current_index;
 248   uint _current_limit;
 249   bool _prepare_failed;
 250   bool _evacuation_failed;
 251 
 252   void prepare_to_evacuate_optional_region(HeapRegion* hr);
 253 
 254 public:
 255   static const uint InvalidCSetIndex = UINT_MAX;
 256 
 257   G1OptionalCSet(G1CollectionSet* cset, G1ParScanThreadStateSet* pset) :
 258     _cset(cset),
 259     _pset(pset),
 260     _current_index(0),
 261     _current_limit(0),
 262     _prepare_failed(false),
 263     _evacuation_failed(false) { }
 264   // The destructor returns regions to the collection set candidates set and
 265   // frees the optional structure in the collection set.
 266   ~G1OptionalCSet();
 267 
 268   uint current_index() { return _current_index; }
 269   uint current_limit() { return _current_limit; }
 270 
 271   uint size();
 272   bool is_empty();
 273 
 274   HeapRegion* region_at(uint index);
 275 
 276   // Prepare a set of regions for optional evacuation.
 277   void prepare_evacuation(double time_left_ms);
 278   bool prepare_failed();
 279 
 280   // Complete the evacuation of the previously prepared
 281   // regions by updating their state and check for failures.
 282   void complete_evacuation();
 283   bool evacuation_failed();
 284 };
 285 
 286 #endif // SHARE_GC_G1_G1COLLECTIONSET_HPP