1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP 26 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP 27 28 #include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp" 29 #include "gc/shenandoah/shenandoahGeneration.hpp" 30 #include "gc/shenandoah/shenandoahSharedVariables.hpp" 31 32 class ShenandoahHeapRegion; 33 class ShenandoahHeapRegionClosure; 34 35 class ShenandoahOldGeneration : public ShenandoahGeneration { 36 private: 37 ShenandoahHeapRegion** _coalesce_and_fill_region_array; 38 ShenandoahOldHeuristics* _old_heuristics; 39 40 // After determining the desired size of the old generation (see compute_old_generation_balance), this 41 // quantity represents the number of regions above (surplus) or below (deficit) that size. 42 // This value is computed prior to the actual exchange of any regions. A positive value represents 43 // a surplus of old regions which will be transferred from old _to_ young. A negative value represents 44 // a deficit of regions that will be replenished by a transfer _from_ young to old. 45 ssize_t _region_balance; 46 47 // Set when evacuation in the old generation fails. When this is set, the control thread will initiate a 48 // full GC instead of a futile degenerated cycle. 49 ShenandoahSharedFlag _failed_evacuation; 50 51 // Bytes reserved within old-gen to hold the results of promotion. This is separate from 52 // and in addition to the evacuation reserve for intra-generation evacuations (ShenandoahGeneration::_evacuation_reserve). 53 size_t _promoted_reserve; 54 55 // Bytes of old-gen memory expended on promotions. This may be modified concurrently 56 // by mutators and gc workers when promotion LABs are retired during evacuation. It 57 // is therefore always accessed through atomic operations. This is increased when a 58 // PLAB is allocated for promotions. The value is decreased by the amount of memory 59 // remaining in a PLAB when it is retired. 60 size_t _promoted_expended; 61 62 // Represents the quantity of live bytes we expect to promote in place during the next 63 // evacuation cycle. This value is used by the young heuristic to trigger mixed collections. 64 // It is also used when computing the optimum size for the old generation. 65 size_t _promotion_potential; 66 67 // When a region is selected to be promoted in place, the remaining free memory is filled 68 // in to prevent additional allocations (preventing premature promotion of newly allocated 69 // objects. This field records the total amount of padding used for such regions. 70 size_t _pad_for_promote_in_place; 71 72 // During construction of the collection set, we keep track of regions that are eligible 73 // for promotion in place. These fields track the count of those humongous and regular regions. 74 // This data is used to force the evacuation phase even when the collection set is otherwise 75 // empty. 76 size_t _promotable_humongous_regions; 77 size_t _promotable_regular_regions; 78 79 // True if old regions may be safely traversed by the remembered set scan. 80 bool _is_parseable; 81 82 bool coalesce_and_fill(); 83 84 public: 85 ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity); 86 87 virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode) override; 88 89 const char* name() const override { 90 return "OLD"; 91 } 92 93 ShenandoahOldHeuristics* heuristics() const override { 94 return _old_heuristics; 95 } 96 97 // See description in field declaration 98 void set_promoted_reserve(size_t new_val); 99 size_t get_promoted_reserve() const; 100 101 // The promotion reserve is increased when rebuilding the free set transfers a region to the old generation 102 void augment_promoted_reserve(size_t increment); 103 104 // This zeros out the expended promotion count after the promotion reserve is computed 105 void reset_promoted_expended(); 106 107 // This is incremented when allocations are made to copy promotions into the old generation 108 size_t expend_promoted(size_t increment); 109 110 // This is used to return unused memory from a retired promotion LAB 111 size_t unexpend_promoted(size_t decrement); 112 113 // This is used on the allocation path to gate promotions that would exceed the reserve 114 size_t get_promoted_expended(); 115 116 // See description in field declaration 117 void set_region_balance(ssize_t balance) { _region_balance = balance; } 118 ssize_t get_region_balance() const { return _region_balance; } 119 // See description in field declaration 120 void set_promotion_potential(size_t val) { _promotion_potential = val; }; 121 size_t get_promotion_potential() const { return _promotion_potential; }; 122 123 // See description in field declaration 124 void set_pad_for_promote_in_place(size_t pad) { _pad_for_promote_in_place = pad; } 125 size_t get_pad_for_promote_in_place() const { return _pad_for_promote_in_place; } 126 127 // See description in field declaration 128 void set_expected_humongous_region_promotions(size_t region_count) { _promotable_humongous_regions = region_count; } 129 void set_expected_regular_region_promotions(size_t region_count) { _promotable_regular_regions = region_count; } 130 bool has_in_place_promotions() const { return (_promotable_humongous_regions + _promotable_regular_regions) > 0; } 131 132 // Class unloading may render the card table offsets unusable, if they refer to unmarked objects 133 bool is_parseable() const { return _is_parseable; } 134 void set_parseable(bool parseable); 135 136 // This will signal the heuristic to trigger an old generation collection 137 void handle_failed_transfer(); 138 139 // This will signal the control thread to run a full GC instead of a futile degenerated gc 140 void handle_failed_evacuation(); 141 142 // This logs that an evacuation to the old generation has failed 143 void handle_failed_promotion(Thread* thread, size_t size); 144 145 // A successful evacuation re-dirties the cards and registers the object with the remembered set 146 void handle_evacuation(HeapWord* obj, size_t words, bool promotion); 147 148 // Clear the flag after it is consumed by the control thread 149 bool clear_failed_evacuation() { 150 return _failed_evacuation.try_unset(); 151 } 152 153 // Transition to the next state after mixed evacuations have completed 154 void complete_mixed_evacuations(); 155 156 // Abandon any future mixed collections. This is invoked when all old regions eligible for 157 // inclusion in a mixed evacuation are pinned. This should be rare. 158 void abandon_mixed_evacuations(); 159 160 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; 161 162 void parallel_region_iterate_free(ShenandoahHeapRegionClosure* cl) override; 163 164 void heap_region_iterate(ShenandoahHeapRegionClosure* cl) override; 165 166 bool contains(ShenandoahHeapRegion* region) const override; 167 bool contains(oop obj) const override; 168 169 void set_concurrent_mark_in_progress(bool in_progress) override; 170 bool is_concurrent_mark_in_progress() override; 171 172 bool entry_coalesce_and_fill(); 173 void prepare_for_mixed_collections_after_global_gc(); 174 void prepare_gc() override; 175 void prepare_regions_and_collection_set(bool concurrent) override; 176 void record_success_concurrent(bool abbreviated) override; 177 void cancel_marking() override; 178 179 // We leave the SATB barrier on for the entirety of the old generation 180 // marking phase. In some cases, this can cause a write to a perfectly 181 // reachable oop to enqueue a pointer that later becomes garbage (because 182 // it points at an object that is later chosen for the collection set). There are 183 // also cases where the referent of a weak reference ends up in the SATB 184 // and is later collected. In these cases the oop in the SATB buffer becomes 185 // invalid and the _next_ cycle will crash during its marking phase. To 186 // avoid this problem, we "purge" the SATB buffers during the final update 187 // references phase if (and only if) an old generation mark is in progress. 188 // At this stage we can safely determine if any of the oops in the SATB 189 // buffer belong to trashed regions (before they are recycled). As it 190 // happens, flushing a SATB queue also filters out oops which have already 191 // been marked - which is the case for anything that is being evacuated 192 // from the collection set. 193 // 194 // Alternatively, we could inspect the state of the heap and the age of the 195 // object at the barrier, but we reject this approach because it is likely 196 // the performance impact would be too severe. 197 void transfer_pointers_from_satb(); 198 199 // True if there are old regions waiting to be selected for a mixed collection 200 bool has_unprocessed_collection_candidates(); 201 202 bool is_doing_mixed_evacuations() const { 203 return state() == EVACUATING || state() == EVACUATING_AFTER_GLOBAL; 204 } 205 206 bool is_preparing_for_mark() const { 207 return state() == FILLING; 208 } 209 210 bool is_idle() const { 211 return state() == WAITING_FOR_BOOTSTRAP; 212 } 213 214 bool is_bootstrapping() const { 215 return state() == BOOTSTRAPPING; 216 } 217 218 // Amount of live memory (bytes) in regions waiting for mixed collections 219 size_t unprocessed_collection_candidates_live_memory(); 220 221 // Abandon any regions waiting for mixed collections 222 void abandon_collection_candidates(); 223 224 void maybe_trigger_collection(size_t first_old_region, size_t last_old_region, size_t old_region_count); 225 public: 226 enum State { 227 FILLING, WAITING_FOR_BOOTSTRAP, BOOTSTRAPPING, MARKING, EVACUATING, EVACUATING_AFTER_GLOBAL 228 }; 229 230 #ifdef ASSERT 231 bool validate_waiting_for_bootstrap(); 232 #endif 233 234 private: 235 State _state; 236 237 static const size_t FRACTIONAL_DENOMINATOR = 64536; 238 239 // During initialization of the JVM, we search for the correct old-gen size by initially performing old-gen 240 // collection when old-gen usage is 50% more (INITIAL_GROWTH_BEFORE_COMPACTION) than the initial old-gen size 241 // estimate (3.125% of heap). The next old-gen trigger occurs when old-gen grows 25% larger than its live 242 // memory at the end of the first old-gen collection. Then we trigger again when old-gen grows 12.5% 243 // more than its live memory at the end of the previous old-gen collection. Thereafter, we trigger each time 244 // old-gen grows more than 12.5% following the end of its previous old-gen collection. 245 static const size_t INITIAL_GROWTH_BEFORE_COMPACTION = FRACTIONAL_DENOMINATOR / 2; // 50.0% 246 247 // INITIAL_LIVE_FRACTION represents the initial guess of how large old-gen should be. We estimate that old-gen 248 // needs to consume 6.25% of the total heap size. And we "pretend" that we start out with this amount of live 249 // old-gen memory. The first old-collection trigger will occur when old-gen occupies 50% more than this initial 250 // approximation of the old-gen memory requirement, in other words when old-gen usage is 150% of 6.25%, which 251 // is 9.375% of the total heap size. 252 static const uint16_t INITIAL_LIVE_FRACTION = FRACTIONAL_DENOMINATOR / 16; // 6.25% 253 254 size_t _live_bytes_after_last_mark; 255 256 // How much growth in usage before we trigger old collection, per FRACTIONAL_DENOMINATOR (65_536) 257 size_t _growth_before_compaction; 258 const size_t _min_growth_before_compaction; // Default is 12.5% 259 260 void validate_transition(State new_state) NOT_DEBUG_RETURN; 261 262 public: 263 State state() const { 264 return _state; 265 } 266 267 const char* state_name() const { 268 return state_name(_state); 269 } 270 271 void transition_to(State new_state); 272 273 size_t get_live_bytes_after_last_mark() const; 274 void set_live_bytes_after_last_mark(size_t new_live); 275 276 size_t usage_trigger_threshold() const; 277 278 bool can_start_gc() { 279 return _state == WAITING_FOR_BOOTSTRAP; 280 } 281 282 static const char* state_name(State state); 283 }; 284 285 286 #endif //SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP