1 /*
  2  * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
 28 
 29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.hpp"
 31 
 32 enum ShenandoahFreeMemoryType : uint8_t {
 33   NotFree,
 34   Mutator,
 35   Collector,
 36   OldCollector,
 37   NumFreeSets
 38 };
 39 
 40 class ShenandoahSetsOfFree {
 41 
 42 private:
 43   size_t _max;                  // The maximum number of heap regions
 44   ShenandoahFreeSet* _free_set;
 45   size_t _region_size_bytes;
 46   ShenandoahFreeMemoryType* _membership;
 47   size_t _leftmosts[NumFreeSets];
 48   size_t _rightmosts[NumFreeSets];
 49   size_t _leftmosts_empty[NumFreeSets];
 50   size_t _rightmosts_empty[NumFreeSets];
 51   size_t _capacity_of[NumFreeSets];
 52   size_t _used_by[NumFreeSets];
 53   bool _left_to_right_bias[NumFreeSets];
 54   size_t _region_counts[NumFreeSets];
 55 
 56   inline void shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx);
 57   inline void expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t capacity);
 58 
 59   // Restore all state variables to initial default state.
 60   void clear_internal();
 61 
 62 public:
 63   ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set);
 64   ~ShenandoahSetsOfFree();
 65 
 66   // Make all regions NotFree and reset all bounds
 67   void clear_all();
 68 
 69   // Remove or retire region idx from all free sets.  Requires that idx is in a free set.  This does not affect capacity.
 70   void remove_from_free_sets(size_t idx);
 71 
 72   // Place region idx into free set which_set.  Requires that idx is currently NotFree.
 73   void make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity);
 74 
 75   // Place region idx into free set new_set.  Requires that idx is currently not NotFree.
 76   void move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity);
 77 
 78   // Returns the ShenandoahFreeMemoryType affiliation of region idx, or NotFree if this region is not currently free.  This does
 79   // not enforce that free_set membership implies allocation capacity.
 80   inline ShenandoahFreeMemoryType membership(size_t idx) const;
 81 
 82   // Returns true iff region idx is in the test_set free_set.  Before returning true, asserts that the free
 83   // set is not empty.  Requires that test_set != NotFree or NumFreeSets.
 84   inline bool in_free_set(size_t idx, ShenandoahFreeMemoryType which_set) const;
 85 
 86   // The following four methods return the left-most and right-most bounds on ranges of regions representing
 87   // the requested set.  The _empty variants represent bounds on the range that holds completely empty
 88   // regions, which are required for humongous allocations and desired for "very large" allocations.  A
 89   // return value of -1 from leftmost() or leftmost_empty() denotes that the corresponding set is empty.
 90   // In other words:
 91   //   if the requested which_set is empty:
 92   //     leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0
 93   //   otherwise, expect the following:
 94   //     0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max
 95   inline size_t leftmost(ShenandoahFreeMemoryType which_set) const;
 96   inline size_t rightmost(ShenandoahFreeMemoryType which_set) const;
 97   size_t leftmost_empty(ShenandoahFreeMemoryType which_set);
 98   size_t rightmost_empty(ShenandoahFreeMemoryType which_set);
 99 
100   inline void increase_used(ShenandoahFreeMemoryType which_set, size_t bytes);
101 
102   inline size_t capacity_of(ShenandoahFreeMemoryType which_set) const {
103     assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
104     return _capacity_of[which_set];
105   }
106 
107   inline size_t used_by(ShenandoahFreeMemoryType which_set) const {
108     assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
109     return _used_by[which_set];
110   }
111 
112   inline size_t max() const { return _max; }
113 
114   inline size_t count(ShenandoahFreeMemoryType which_set) const { return _region_counts[which_set]; }
115 
116   // Return true iff regions for allocation from this set should be peformed left to right.  Otherwise, allocate
117   // from right to left.
118   inline bool alloc_from_left_bias(ShenandoahFreeMemoryType which_set);
119 
120   // Determine whether we prefer to allocate from left to right or from right to left for this free-set.
121   void establish_alloc_bias(ShenandoahFreeMemoryType which_set);
122 
123   // Assure leftmost, rightmost, leftmost_empty, and rightmost_empty bounds are valid for all free sets.
124   // Valid bounds honor all of the following (where max is the number of heap regions):
125   //   if the set is empty, leftmost equals max and rightmost equals 0
126   //   Otherwise (the set is not empty):
127   //     0 <= leftmost < max and 0 <= rightmost < max
128   //     the region at leftmost is in the set
129   //     the region at rightmost is in the set
130   //     rightmost >= leftmost
131   //     for every idx that is in the set {
132   //       idx >= leftmost &&
133   //       idx <= rightmost
134   //     }
135   //   if the set has no empty regions, leftmost_empty equals max and rightmost_empty equals 0
136   //   Otherwise (the region has empty regions):
137   //     0 <= lefmost_empty < max and 0 <= rightmost_empty < max
138   //     rightmost_empty >= leftmost_empty
139   //     for every idx that is in the set and is empty {
140   //       idx >= leftmost &&
141   //       idx <= rightmost
142   //     }
143   void assert_bounds() NOT_DEBUG_RETURN;
144 };
145 
146 class ShenandoahFreeSet : public CHeapObj<mtGC> {
147 private:
148   ShenandoahHeap* const _heap;
149   ShenandoahSetsOfFree _free_sets;
150 
151   HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region);
152 
153   HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r);
154 
155   // Satisfy young-generation or single-generation collector allocation request req by finding memory that matches
156   // affiliation, which either equals req.affiliation or FREE.  We know req.is_young().
157   HeapWord* allocate_with_affiliation(ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region);
158 
159   // Satisfy allocation request req by finding memory that matches affiliation, which either equals req.affiliation
160   // or FREE. We know req.is_old().
161   HeapWord* allocate_old_with_affiliation(ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region);
162 
163   // While holding the heap lock, allocate memory for a single object which is to be entirely contained
164   // within a single HeapRegion as characterized by req.  The req.size() value is known to be less than or
165   // equal to ShenandoahHeapRegion::humongous_threshold_words().  The caller of allocate_single is responsible
166   // for registering the resulting object and setting the remembered set card values as appropriate.  The
167   // most common case is that we are allocating a PLAB in which case object registering and card dirtying
168   // is managed after the PLAB is divided into individual objects.
169   HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region);
170   HeapWord* allocate_contiguous(ShenandoahAllocRequest& req);
171 
172   void flip_to_gc(ShenandoahHeapRegion* r);
173   void flip_to_old_gc(ShenandoahHeapRegion* r);
174 
175   void adjust_bounds_for_additional_old_collector_free_region(size_t idx);
176 
177   void recompute_bounds();
178   void adjust_bounds();
179   bool touches_bounds(size_t num) const;
180 
181   // Used of free set represents the amount of is_mutator_free set that has been consumed since most recent rebuild.
182   void increase_used(size_t amount);
183   void clear_internal();
184 
185   void try_recycle_trashed(ShenandoahHeapRegion *r);
186 
187   bool can_allocate_from(ShenandoahHeapRegion *r) const;
188   bool can_allocate_from(size_t idx) const;
189   bool has_alloc_capacity(size_t idx) const;
190   bool has_alloc_capacity(ShenandoahHeapRegion *r) const;
191   bool has_no_alloc_capacity(ShenandoahHeapRegion *r) const;
192 
193 public:
194   ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
195 
196   size_t alloc_capacity(ShenandoahHeapRegion *r) const;
197   size_t alloc_capacity(size_t idx) const;
198 
199   void clear();
200   void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions);
201   void rebuild(size_t young_cset_regions, size_t old_cset_regions);
202   void move_collector_sets_to_mutator(size_t cset_regions);
203 
204   void add_old_collector_free_region(ShenandoahHeapRegion* region);
205 
206   void recycle_trash();
207 
208   void log_status();
209 
210   inline size_t capacity()  const { return _free_sets.capacity_of(Mutator); }
211   inline size_t used()      const { return _free_sets.used_by(Mutator);     }
212   inline size_t available() const {
213     assert(used() <= capacity(), "must use less than capacity");
214     return capacity() - used();
215   }
216 
217   HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region);
218   size_t unsafe_peek_free() const;
219 
220   double internal_fragmentation();
221   double external_fragmentation();
222 
223   void print_on(outputStream* out) const;
224 
225   void find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions);
226   void reserve_regions(size_t young_reserve, size_t old_reserve);
227 };
228 
229 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP