< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp

Print this page

 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
 28 
 29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.hpp"
 31 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
 32 
 33 // Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId.
 34 enum class ShenandoahFreeSetPartitionId : uint8_t {
 35   Mutator,                      // Region is in the Mutator free set: available memory is available to mutators.
 36   Collector,                    // Region is in the Collector free set: available memory is reserved for evacuations.


 37   NotFree                       // Region is in no free set: it has no available memory
 38 };
 39 
 40 // We do not maintain counts, capacity, or used for regions that are not free.  Informally, if a region is NotFree, it is
 41 // in no partition.  NumPartitions represents the size of an array that may be indexed by Mutator or Collector.
 42 #define NumPartitions           (ShenandoahFreeSetPartitionId::NotFree)
 43 #define IntNumPartitions     int(ShenandoahFreeSetPartitionId::NotFree)
 44 #define UIntNumPartitions   uint(ShenandoahFreeSetPartitionId::NotFree)
 45 
 46 // ShenandoahRegionPartitions provides an abstraction to help organize the implementation of ShenandoahFreeSet.  This
 47 // class implements partitioning of regions into distinct sets.  Each ShenandoahHeapRegion is either in the Mutator free set,
 48 // the Collector free set, or in neither free set (NotFree).  When we speak of a "free partition", we mean partitions that
 49 // for which the ShenandoahFreeSetPartitionId is not equal to NotFree.
 50 class ShenandoahRegionPartitions {
 51 
 52 private:
 53   const ssize_t _max;           // The maximum number of heap regions
 54   const size_t _region_size_bytes;
 55   const ShenandoahFreeSet* _free_set;
 56   // For each partition, we maintain a bitmap of which regions are affiliated with his partition.

 63   ssize_t _leftmosts[UIntNumPartitions];
 64   ssize_t _rightmosts[UIntNumPartitions];
 65 
 66   // Allocation for humongous objects needs to find regions that are entirely empty.  For each partion p, _leftmosts_empty[p]
 67   // represents the first region belonging to this partition that is completely empty and _rightmosts_empty[p] represents the
 68   // last region that is completely empty.  If there is no completely empty region in this partition, this is represented
 69   // by the canonical [_max, -1].
 70   ssize_t _leftmosts_empty[UIntNumPartitions];
 71   ssize_t _rightmosts_empty[UIntNumPartitions];
 72 
 73   // For each partition p, _capacity[p] represents the total amount of memory within the partition at the time
 74   // of the most recent rebuild, _used[p] represents the total amount of memory that has been allocated within this
 75   // partition (either already allocated as of the rebuild, or allocated since the rebuild).  _capacity[p] and _used[p]
 76   // are denoted in bytes.  Note that some regions that had been assigned to a particular partition at rebuild time
 77   // may have been retired following the rebuild.  The tallies for these regions are still reflected in _capacity[p]
 78   // and _used[p], even though the region may have been removed from the free set.
 79   size_t _capacity[UIntNumPartitions];
 80   size_t _used[UIntNumPartitions];
 81   size_t _region_counts[UIntNumPartitions];
 82 




 83   // Shrink the intervals associated with partition when region idx is removed from this free set
 84   inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx);
 85 
 86   // Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set
 87   inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
 88                                                                 ssize_t low_idx, ssize_t high_idx);
 89   inline void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx, size_t capacity);
 90 





 91 #ifndef PRODUCT
 92   void dump_bitmap_row(ssize_t region_idx) const;
 93   void dump_bitmap_range(ssize_t start_region_idx, ssize_t end_region_idx) const;
 94   void dump_bitmap() const;
 95 #endif
 96 public:
 97   ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set);
 98   ~ShenandoahRegionPartitions() {}
 99 
100   // Remove all regions from all partitions and reset all bounds
101   void make_all_regions_unavailable();
102 
103   // Set the partition id for a particular region without adjusting interval bounds or usage/capacity tallies
104   inline void raw_assign_membership(size_t idx, ShenandoahFreeSetPartitionId p) {
105     _membership[int(p)].set_bit(idx);
106   }
107 
108   // Set the Mutator intervals, usage, and capacity according to arguments.  Reset the Collector intervals, used, capacity
109   // to represent empty Collector free set.  We use this at the end of rebuild_free_set() to avoid the overhead of making
110   // many redundant incremental adjustments to the mutator intervals as the free set is being rebuilt.
111   void establish_mutator_intervals(ssize_t mutator_leftmost, ssize_t mutator_rightmost,
112                                    ssize_t mutator_leftmost_empty, ssize_t mutator_rightmost_empty,
113                                    size_t mutator_region_count, size_t mutator_used);
114 







115   // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals.
116   // Requires that region idx is in in the Mutator or Collector partitions.  Hereafter, identifies this region as NotFree.
117   // Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes.
118   void retire_from_partition(ShenandoahFreeSetPartitionId p, ssize_t idx, size_t used_bytes);
119 
120   // Retire all regions between low_idx and high_idx inclusive from within partition.  Requires that each region idx is
121   // in the same Mutator or Collector partition.  Hereafter, identifies each region as NotFree.   Assumes that each region
122   // is now considered fully used, since the region is presumably used to represent a humongous object.
123   void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, ssize_t low_idx, ssize_t high_idx);
124 
125   // Place region idx into free set which_partition.  Requires that idx is currently NotFree.
126   void make_free(ssize_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity);
127 
128   // Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition
129   // given that available bytes can still be allocated within this region.  Requires that idx is currently not NotFree.
130   void move_from_partition_to_partition(ssize_t idx, ShenandoahFreeSetPartitionId orig_partition,
131                                         ShenandoahFreeSetPartitionId new_partition, size_t available);
132 
133   const char* partition_membership_name(ssize_t idx) const;
134 

163   inline size_t max_regions() const { return _max; }
164 
165   inline size_t region_size_bytes() const { return _region_size_bytes; };
166 
167   // The following four methods return the left-most and right-most bounds on ranges of regions representing
168   // the requested set.  The _empty variants represent bounds on the range that holds completely empty
169   // regions, which are required for humongous allocations and desired for "very large" allocations.
170   //   if the requested which_partition is empty:
171   //     leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0
172   //   otherwise, expect the following:
173   //     0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max
174   inline ssize_t leftmost(ShenandoahFreeSetPartitionId which_partition) const;
175   inline ssize_t rightmost(ShenandoahFreeSetPartitionId which_partition) const;
176   ssize_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition);
177   ssize_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition);
178 
179   inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const;
180 
181   inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
182 










183   inline size_t capacity_of(ShenandoahFreeSetPartitionId which_partition) const {
184     assert (which_partition < NumPartitions, "selected free set must be valid");
185     return _capacity[int(which_partition)];
186   }
187 
188   inline size_t used_by(ShenandoahFreeSetPartitionId which_partition) const {
189     assert (which_partition < NumPartitions, "selected free set must be valid");
190     return _used[int(which_partition)];
191   }
192 
193   inline size_t available_in(ShenandoahFreeSetPartitionId which_partition) const {
194     assert (which_partition < NumPartitions, "selected free set must be valid");
195     return _capacity[int(which_partition)] - _used[int(which_partition)];
196   }
197 
198   inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value) {
199     assert (which_partition < NumPartitions, "selected free set must be valid");
200     _capacity[int(which_partition)] = value;
201   }
202 

220   //       idx <= rightmost
221   //     }
222   //   if the set has no empty regions, leftmost_empty equals max and rightmost_empty equals 0
223   //   Otherwise (the region has empty regions):
224   //     0 <= leftmost_empty < max and 0 <= rightmost_empty < max
225   //     rightmost_empty >= leftmost_empty
226   //     for every idx that is in the set and is empty {
227   //       idx >= leftmost &&
228   //       idx <= rightmost
229   //     }
230   void assert_bounds() NOT_DEBUG_RETURN;
231 };
232 
233 // Publicly, ShenandoahFreeSet represents memory that is available to mutator threads.  The public capacity(), used(),
234 // and available() methods represent this public notion of memory that is under control of the mutator.  Separately,
235 // ShenandoahFreeSet also represents memory available to garbage collection activities for compaction purposes.
236 //
237 // The Shenandoah garbage collector evacuates live objects out of specific regions that are identified as members of the
238 // collection set (cset).
239 //
240 // The ShenandoahFreeSet endeavors to congregrate survivor objects (objects that have been evacuated at least once) at the
241 // high end of memory.  New mutator allocations are taken from the low end of memory.  Within the mutator's range of regions,
242 // humongous allocations are taken from the lowest addresses, and LAB (local allocation buffers) and regular shared allocations
243 // are taken from the higher address of the mutator's range of regions.  This approach allows longer lasting survivor regions
244 // to congregate at the top of the heap and longer lasting humongous regions to congregate at the bottom of the heap, with
245 // short-lived frequently evacuated regions occupying the middle of the heap.
246 //
247 // Mutator and garbage collection activities tend to scramble the content of regions.  Twice, during each GC pass, we rebuild
248 // the free set in an effort to restore the efficient segregation of Collector and Mutator regions:
249 //
250 //  1. At the start of evacuation, we know exactly how much memory is going to be evacuated, and this guides our
251 //     sizing of the Collector free set.
252 //
253 //  2. At the end of GC, we have reclaimed all of the memory that was spanned by the cset.  We rebuild here to make
254 //     sure there is enough memory reserved at the high end of memory to hold the objects that might need to be evacuated
255 //     during the next GC pass.
256 
257 class ShenandoahFreeSet : public CHeapObj<mtGC> {
258 private:
259   ShenandoahHeap* const _heap;
260   ShenandoahRegionPartitions _partitions;
261   ShenandoahHeapRegion** _trash_regions;
262 
263   // Mutator allocations are biased from left-to-right or from right-to-left based on which end of mutator range
264   // is most likely to hold partially used regions.  In general, we want to finish consuming partially used
265   // regions and retire them in order to reduce the regions that must be searched for each allocation request.
266   bool _right_to_left_bias;


267 
268   // We re-evaluate the left-to-right allocation bias whenever _alloc_bias_weight is less than zero.  Each time
269   // we allocate an object, we decrement the count of this value.  Each time we re-evaluate whether to allocate
270   // from right-to-left or left-to-right, we reset the value of this counter to _InitialAllocBiasWeight.
271   ssize_t _alloc_bias_weight;
272 
273   const ssize_t _InitialAllocBiasWeight = 256;
274 


275   HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region);
276 
277   // While holding the heap lock, allocate memory for a single object or LAB  which is to be entirely contained
278   // within a single HeapRegion as characterized by req.
279   //
280   // Precondition: !ShenandoahHeapRegion::requires_humongous(req.size())
281   HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region);
282 
283   // While holding the heap lock, allocate memory for a humongous object which spans one or more regions that
284   // were previously empty.  Regions that represent humongous objects are entirely dedicated to the humongous
285   // object.  No other objects are packed into these regions.
286   //
287   // Precondition: ShenandoahHeapRegion::requires_humongous(req.size())
288   HeapWord* allocate_contiguous(ShenandoahAllocRequest& req);
289 
290   // Change region r from the Mutator partition to the GC's Collector partition.  This requires that the region is entirely empty.


291   // Typical usage: During evacuation, the GC may find it needs more memory than had been reserved at the start of evacuation to
292   // hold evacuated objects.  If this occurs and memory is still available in the Mutator's free set, we will flip a region from
293   // the Mutator free set into the Collector free set.
294   void flip_to_gc(ShenandoahHeapRegion* r);

























295   void clear_internal();
296   void try_recycle_trashed(ShenandoahHeapRegion *r);
297 
298   // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
299   // immediate trash and we'll be able to immediately recycle it.  Note that we cannot recycle immediate trash if
300   // concurrent weak root processing is in progress.
301   inline bool can_allocate_from(ShenandoahHeapRegion *r) const;
302   inline bool can_allocate_from(size_t idx) const;
303 
304   inline bool has_alloc_capacity(ShenandoahHeapRegion *r) const;
305 
306   // This function places all regions that have allocation capacity into the mutator_partition, identifying regions
307   // that have no allocation capacity as NotFree.  Subsequently, we will move some of the mutator regions into the
308   // collector partition with the intent of packing collector memory into the highest (rightmost) addresses of the
309   // heap, with mutator memory consuming the lowest addresses of the heap.
310   void find_regions_with_alloc_capacity(size_t &cset_regions);

311 
312   // Having placed all regions that have allocation capacity into the mutator partition, move some of these regions from
313   // the mutator partition into the collector partition in order to assure that the memory available for allocations within
314   // the collector partition is at least to_reserve.
315   void reserve_regions(size_t to_reserve);
316 
317   // Overwrite arguments to represent the number of regions to be reclaimed from the cset
318   void prepare_to_rebuild(size_t &cset_regions);
319 
320   void finish_rebuild(size_t cset_regions);


321 
322   // log status, assuming lock has already been acquired by the caller.
323   void log_status();
324 
325 public:
326   ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
327 
328   // Public because ShenandoahRegionPartitions assertions require access.
329   inline size_t alloc_capacity(ShenandoahHeapRegion *r) const;
330   inline size_t alloc_capacity(size_t idx) const;
331 
332   void clear();
333   void rebuild();




































334 
335   // Move up to cset_regions number of regions from being available to the collector to being available to the mutator.
336   //
337   // Typical usage: At the end of evacuation, when the collector no longer needs the regions that had been reserved
338   // for evacuation, invoke this to make regions available for mutator allocations.
339   //
340   // Note that we plan to replenish the Collector reserve at the end of update refs, at which time all
341   // of the regions recycled from the collection set will be available.  If the very unlikely event that there
342   // are fewer regions in the collection set than remain in the collector set, we limit the transfer in order
343   // to assure that the replenished Collector reserve can be sufficiently large.
344   void move_regions_from_collector_to_mutator(size_t cset_regions);
345 
346   void recycle_trash();

347   // Acquire heap lock and log status, assuming heap lock is not acquired by the caller.
348   void log_status_under_lock();
349 
350   inline size_t capacity()  const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); }
351   inline size_t used()      const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);     }
352   inline size_t available() const {
353     assert(used() <= capacity(), "must use less than capacity");
354     return capacity() - used();
355   }
356 
357   HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region);
358   size_t unsafe_peek_free() const;
359 
360   /*
361    * Internal fragmentation metric: describes how fragmented the heap regions are.
362    *
363    * It is derived as:
364    *
365    *               sum(used[i]^2, i=0..k)
366    *   IF = 1 - ------------------------------
367    *              C * sum(used[i], i=0..k)
368    *
369    * ...where k is the number of regions in computation, C is the region capacity, and
370    * used[i] is the used space in the region.
371    *
372    * The non-linearity causes IF to be lower for the cases where the same total heap
373    * used is densely packed. For example:
374    *   a) Heap is completely full  => IF = 0
375    *   b) Heap is half full, first 50% regions are completely full => IF = 0
376    *   c) Heap is half full, each region is 50% full => IF = 1/2
377    *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
378    *   e) Heap is quarter full, each region is 25% full => IF = 3/4
379    *   f) Heap has one small object per each region => IF =~ 1
380    */
381   double internal_fragmentation();
382 
383   /*
384    * External fragmentation metric: describes how fragmented the heap is.
385    *
386    * It is derived as:
387    *
388    *   EF = 1 - largest_contiguous_free / total_free
389    *
390    * For example:
391    *   a) Heap is completely empty => EF = 0
392    *   b) Heap is completely full => EF = 0
393    *   c) Heap is first-half full => EF = 1/2
394    *   d) Heap is half full, full and empty regions interleave => EF =~ 1
395    */
396   double external_fragmentation();
397 
398   void print_on(outputStream* out) const;






















399 };
400 
401 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP

 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
 28 
 29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 30 #include "gc/shenandoah/shenandoahHeap.hpp"
 31 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
 32 
 33 // Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId.
 34 enum class ShenandoahFreeSetPartitionId : uint8_t {
 35   Mutator,                      // Region is in the Mutator free set: available memory is available to mutators.
 36   Collector,                    // Region is in the Collector free set: available memory is reserved for evacuations.
 37   OldCollector,                 // Region is in the Old Collector free set:
 38                                 //    available memory is reserved for old evacuations and for promotions..
 39   NotFree                       // Region is in no free set: it has no available memory
 40 };
 41 
 42 // We do not maintain counts, capacity, or used for regions that are not free.  Informally, if a region is NotFree, it is
 43 // in no partition.  NumPartitions represents the size of an array that may be indexed by Mutator or Collector.
 44 #define NumPartitions           (ShenandoahFreeSetPartitionId::NotFree)
 45 #define IntNumPartitions     int(ShenandoahFreeSetPartitionId::NotFree)
 46 #define UIntNumPartitions   uint(ShenandoahFreeSetPartitionId::NotFree)
 47 
 48 // ShenandoahRegionPartitions provides an abstraction to help organize the implementation of ShenandoahFreeSet.  This
 49 // class implements partitioning of regions into distinct sets.  Each ShenandoahHeapRegion is either in the Mutator free set,
 50 // the Collector free set, or in neither free set (NotFree).  When we speak of a "free partition", we mean partitions that
 51 // for which the ShenandoahFreeSetPartitionId is not equal to NotFree.
 52 class ShenandoahRegionPartitions {
 53 
 54 private:
 55   const ssize_t _max;           // The maximum number of heap regions
 56   const size_t _region_size_bytes;
 57   const ShenandoahFreeSet* _free_set;
 58   // For each partition, we maintain a bitmap of which regions are affiliated with his partition.

 65   ssize_t _leftmosts[UIntNumPartitions];
 66   ssize_t _rightmosts[UIntNumPartitions];
 67 
 68   // Allocation for humongous objects needs to find regions that are entirely empty.  For each partion p, _leftmosts_empty[p]
 69   // represents the first region belonging to this partition that is completely empty and _rightmosts_empty[p] represents the
 70   // last region that is completely empty.  If there is no completely empty region in this partition, this is represented
 71   // by the canonical [_max, -1].
 72   ssize_t _leftmosts_empty[UIntNumPartitions];
 73   ssize_t _rightmosts_empty[UIntNumPartitions];
 74 
 75   // For each partition p, _capacity[p] represents the total amount of memory within the partition at the time
 76   // of the most recent rebuild, _used[p] represents the total amount of memory that has been allocated within this
 77   // partition (either already allocated as of the rebuild, or allocated since the rebuild).  _capacity[p] and _used[p]
 78   // are denoted in bytes.  Note that some regions that had been assigned to a particular partition at rebuild time
 79   // may have been retired following the rebuild.  The tallies for these regions are still reflected in _capacity[p]
 80   // and _used[p], even though the region may have been removed from the free set.
 81   size_t _capacity[UIntNumPartitions];
 82   size_t _used[UIntNumPartitions];
 83   size_t _region_counts[UIntNumPartitions];
 84 
 85   // For each partition p, _left_to_right_bias is true iff allocations are normally made from lower indexed regions
 86   // before higher indexed regions.
 87   bool _left_to_right_bias[UIntNumPartitions];
 88 
 89   // Shrink the intervals associated with partition when region idx is removed from this free set
 90   inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx);
 91 
 92   // Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set
 93   inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
 94                                                                 ssize_t low_idx, ssize_t high_idx);
 95   inline void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx, size_t capacity);
 96 
 97   inline bool is_mutator_partition(ShenandoahFreeSetPartitionId p);
 98   inline bool is_young_collector_partition(ShenandoahFreeSetPartitionId p);
 99   inline bool is_old_collector_partition(ShenandoahFreeSetPartitionId p);
100   inline bool available_implies_empty(size_t available);
101 
102 #ifndef PRODUCT
103   void dump_bitmap_row(ssize_t region_idx) const;
104   void dump_bitmap_range(ssize_t start_region_idx, ssize_t end_region_idx) const;
105   void dump_bitmap() const;
106 #endif
107 public:
108   ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set);
109   ~ShenandoahRegionPartitions() {}
110 
111   // Remove all regions from all partitions and reset all bounds
112   void make_all_regions_unavailable();
113 
114   // Set the partition id for a particular region without adjusting interval bounds or usage/capacity tallies
115   inline void raw_assign_membership(size_t idx, ShenandoahFreeSetPartitionId p) {
116     _membership[int(p)].set_bit(idx);
117   }
118 
119   // Set the Mutator intervals, usage, and capacity according to arguments.  Reset the Collector intervals, used, capacity
120   // to represent empty Collector free set.  We use this at the end of rebuild_free_set() to avoid the overhead of making
121   // many redundant incremental adjustments to the mutator intervals as the free set is being rebuilt.
122   void establish_mutator_intervals(ssize_t mutator_leftmost, ssize_t mutator_rightmost,
123                                    ssize_t mutator_leftmost_empty, ssize_t mutator_rightmost_empty,
124                                    size_t mutator_region_count, size_t mutator_used);
125 
126   // Set the OldCollector intervals, usage, and capacity according to arguments.  We use this at the end of rebuild_free_set()
127   // to avoid the overhead of making many redundant incremental adjustments to the mutator intervals as the free set is being
128   // rebuilt.
129   void establish_old_collector_intervals(ssize_t old_collector_leftmost, ssize_t old_collector_rightmost,
130                                          ssize_t old_collector_leftmost_empty, ssize_t old_collector_rightmost_empty,
131                                          size_t old_collector_region_count, size_t old_collector_used);
132 
133   // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals.
134   // Requires that region idx is in in the Mutator or Collector partitions.  Hereafter, identifies this region as NotFree.
135   // Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes.
136   void retire_from_partition(ShenandoahFreeSetPartitionId p, ssize_t idx, size_t used_bytes);
137 
138   // Retire all regions between low_idx and high_idx inclusive from within partition.  Requires that each region idx is
139   // in the same Mutator or Collector partition.  Hereafter, identifies each region as NotFree.   Assumes that each region
140   // is now considered fully used, since the region is presumably used to represent a humongous object.
141   void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, ssize_t low_idx, ssize_t high_idx);
142 
143   // Place region idx into free set which_partition.  Requires that idx is currently NotFree.
144   void make_free(ssize_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity);
145 
146   // Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition
147   // given that available bytes can still be allocated within this region.  Requires that idx is currently not NotFree.
148   void move_from_partition_to_partition(ssize_t idx, ShenandoahFreeSetPartitionId orig_partition,
149                                         ShenandoahFreeSetPartitionId new_partition, size_t available);
150 
151   const char* partition_membership_name(ssize_t idx) const;
152 

181   inline size_t max_regions() const { return _max; }
182 
183   inline size_t region_size_bytes() const { return _region_size_bytes; };
184 
185   // The following four methods return the left-most and right-most bounds on ranges of regions representing
186   // the requested set.  The _empty variants represent bounds on the range that holds completely empty
187   // regions, which are required for humongous allocations and desired for "very large" allocations.
188   //   if the requested which_partition is empty:
189   //     leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0
190   //   otherwise, expect the following:
191   //     0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max
192   inline ssize_t leftmost(ShenandoahFreeSetPartitionId which_partition) const;
193   inline ssize_t rightmost(ShenandoahFreeSetPartitionId which_partition) const;
194   ssize_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition);
195   ssize_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition);
196 
197   inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const;
198 
199   inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
200 
201   inline void set_bias_from_left_to_right(ShenandoahFreeSetPartitionId which_partition, bool value) {
202     assert (which_partition < NumPartitions, "selected free set must be valid");
203     _left_to_right_bias[int(which_partition)] = value;
204   }
205 
206   inline bool alloc_from_left_bias(ShenandoahFreeSetPartitionId which_partition) const {
207     assert (which_partition < NumPartitions, "selected free set must be valid");
208     return _left_to_right_bias[int(which_partition)];
209   }
210 
211   inline size_t capacity_of(ShenandoahFreeSetPartitionId which_partition) const {
212     assert (which_partition < NumPartitions, "selected free set must be valid");
213     return _capacity[int(which_partition)];
214   }
215 
216   inline size_t used_by(ShenandoahFreeSetPartitionId which_partition) const {
217     assert (which_partition < NumPartitions, "selected free set must be valid");
218     return _used[int(which_partition)];
219   }
220 
221   inline size_t available_in(ShenandoahFreeSetPartitionId which_partition) const {
222     assert (which_partition < NumPartitions, "selected free set must be valid");
223     return _capacity[int(which_partition)] - _used[int(which_partition)];
224   }
225 
226   inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value) {
227     assert (which_partition < NumPartitions, "selected free set must be valid");
228     _capacity[int(which_partition)] = value;
229   }
230 

248   //       idx <= rightmost
249   //     }
250   //   if the set has no empty regions, leftmost_empty equals max and rightmost_empty equals 0
251   //   Otherwise (the region has empty regions):
252   //     0 <= leftmost_empty < max and 0 <= rightmost_empty < max
253   //     rightmost_empty >= leftmost_empty
254   //     for every idx that is in the set and is empty {
255   //       idx >= leftmost &&
256   //       idx <= rightmost
257   //     }
258   void assert_bounds() NOT_DEBUG_RETURN;
259 };
260 
261 // Publicly, ShenandoahFreeSet represents memory that is available to mutator threads.  The public capacity(), used(),
262 // and available() methods represent this public notion of memory that is under control of the mutator.  Separately,
263 // ShenandoahFreeSet also represents memory available to garbage collection activities for compaction purposes.
264 //
265 // The Shenandoah garbage collector evacuates live objects out of specific regions that are identified as members of the
266 // collection set (cset).
267 //
268 // The ShenandoahFreeSet tries to colocate survivor objects (objects that have been evacuated at least once) at the
269 // high end of memory.  New mutator allocations are taken from the low end of memory.  Within the mutator's range of regions,
270 // humongous allocations are taken from the lowest addresses, and LAB (local allocation buffers) and regular shared allocations
271 // are taken from the higher address of the mutator's range of regions.  This approach allows longer lasting survivor regions
272 // to congregate at the top of the heap and longer lasting humongous regions to congregate at the bottom of the heap, with
273 // short-lived frequently evacuated regions occupying the middle of the heap.
274 //
275 // Mutator and garbage collection activities tend to scramble the content of regions.  Twice, during each GC pass, we rebuild
276 // the free set in an effort to restore the efficient segregation of Collector and Mutator regions:
277 //
278 //  1. At the start of evacuation, we know exactly how much memory is going to be evacuated, and this guides our
279 //     sizing of the Collector free set.
280 //
281 //  2. At the end of GC, we have reclaimed all of the memory that was spanned by the cset.  We rebuild here to make
282 //     sure there is enough memory reserved at the high end of memory to hold the objects that might need to be evacuated
283 //     during the next GC pass.
284 
285 class ShenandoahFreeSet : public CHeapObj<mtGC> {
286 private:
287   ShenandoahHeap* const _heap;
288   ShenandoahRegionPartitions _partitions;
289   ShenandoahHeapRegion** _trash_regions;
290 
291   HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r);
292 
293   // Return the address of memory allocated, setting in_new_region to true iff the allocation is taken
294   // from a region that was previously empty.  Return nullptr if memory could not be allocated.
295   inline HeapWord* allocate_from_partition_with_affiliation(ShenandoahAffiliation affiliation,
296                                                             ShenandoahAllocRequest& req, bool& in_new_region);
297 
298   // We re-evaluate the left-to-right allocation bias whenever _alloc_bias_weight is less than zero.  Each time
299   // we allocate an object, we decrement the count of this value.  Each time we re-evaluate whether to allocate
300   // from right-to-left or left-to-right, we reset the value of this counter to _InitialAllocBiasWeight.
301   ssize_t _alloc_bias_weight;
302 
303   const ssize_t INITIAL_ALLOC_BIAS_WEIGHT = 256;
304 
305   // Increases used memory for the partition if the allocation is successful. `in_new_region` will be set
306   // if this is the first allocation in the region.
307   HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region);
308 
309   // While holding the heap lock, allocate memory for a single object or LAB  which is to be entirely contained
310   // within a single HeapRegion as characterized by req.
311   //
312   // Precondition: !ShenandoahHeapRegion::requires_humongous(req.size())
313   HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region);
314 
315   // While holding the heap lock, allocate memory for a humongous object which spans one or more regions that
316   // were previously empty.  Regions that represent humongous objects are entirely dedicated to the humongous
317   // object.  No other objects are packed into these regions.
318   //
319   // Precondition: ShenandoahHeapRegion::requires_humongous(req.size())
320   HeapWord* allocate_contiguous(ShenandoahAllocRequest& req);
321 
322   // Change region r from the Mutator partition to the GC's Collector or OldCollector partition.  This requires that the
323   // region is entirely empty.
324   //
325   // Typical usage: During evacuation, the GC may find it needs more memory than had been reserved at the start of evacuation to
326   // hold evacuated objects.  If this occurs and memory is still available in the Mutator's free set, we will flip a region from
327   // the Mutator free set into the Collector or OldCollector free set.
328   void flip_to_gc(ShenandoahHeapRegion* r);
329   void flip_to_old_gc(ShenandoahHeapRegion* r);
330 
331   // Handle allocation for mutator.
332   HeapWord* allocate_for_mutator(ShenandoahAllocRequest &req, bool &in_new_region);
333 
334   // Update allocation bias and decided whether to allocate from the left or right side of the heap.
335   void update_allocation_bias();
336 
337   // Search for regions to satisfy allocation request using iterator.
338   template<typename Iter>
339   HeapWord* allocate_from_regions(Iter& iterator, ShenandoahAllocRequest &req, bool &in_new_region);
340 
341   // Handle allocation for collector (for evacuation).
342   HeapWord* allocate_for_collector(ShenandoahAllocRequest& req, bool& in_new_region);
343 
344   // Search for allocation in region with same affiliation as request, using given iterator.
345   template<typename Iter>
346   HeapWord* allocate_with_affiliation(Iter& iterator, ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region);
347 
348   // Return true if the respective generation for this request has free regions.
349   bool can_allocate_in_new_region(const ShenandoahAllocRequest& req);
350 
351   // Attempt to allocate memory for an evacuation from the mutator's partition.
352   HeapWord* try_allocate_from_mutator(ShenandoahAllocRequest& req, bool& in_new_region);
353 
354   void clear_internal();
355   void try_recycle_trashed(ShenandoahHeapRegion *r);
356 
357   // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
358   // immediate trash and we'll be able to immediately recycle it.  Note that we cannot recycle immediate trash if
359   // concurrent weak root processing is in progress.
360   inline bool can_allocate_from(ShenandoahHeapRegion *r) const;
361   inline bool can_allocate_from(size_t idx) const;
362 
363   inline bool has_alloc_capacity(ShenandoahHeapRegion *r) const;
364 
365   size_t transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
366                                                                   size_t max_xfer_regions,
367                                                                   size_t& bytes_transferred);
368   size_t transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
369                                                                       size_t max_xfer_regions,
370                                                                       size_t& bytes_transferred);
371 




372 
373   // Determine whether we prefer to allocate from left to right or from right to left within the OldCollector free-set.
374   void establish_old_collector_alloc_bias();
375 
376   // Set max_capacity for young and old generations
377   void establish_generation_sizes(size_t young_region_count, size_t old_region_count);
378   size_t get_usable_free_words(size_t free_bytes) const;
379 
380   // log status, assuming lock has already been acquired by the caller.
381   void log_status();
382 
383 public:
384   ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
385 
386   // Public because ShenandoahRegionPartitions assertions require access.
387   inline size_t alloc_capacity(ShenandoahHeapRegion *r) const;
388   inline size_t alloc_capacity(size_t idx) const;
389 
390   void clear();
391 
392   // Examine the existing free set representation, capturing the current state into var arguments:
393   //
394   // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
395   //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
396   //   first_old_region is the index of the first region that is part of the OldCollector set
397   //    last_old_region is the index of the last region that is part of the OldCollector set
398   //   old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
399   void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
400                           size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
401 
402   // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
403   // hold the results of evacuating to young-gen and to old-gen, and have_evacuation_reserves should be true.
404   // These quantities, stored as reserves for their respective generations, are consulted prior to rebuilding
405   // the free set (ShenandoahFreeSet) in preparation for evacuation.  When the free set is rebuilt, we make sure
406   // to reserve sufficient memory in the collector and old_collector sets to hold evacuations.
407   //
408   // We also rebuild the free set at the end of GC, as we prepare to idle GC until the next trigger.  In this case,
409   // have_evacuation_reserves is false because we don't yet know how much memory will need to be evacuated in the
410   // next GC cycle.  When have_evacuation_reserves is false, the free set rebuild operation reserves for the collector
411   // and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and
412   // ShenandoahOldCompactionReserve.  In a future planned enhancement, the reserve for old_collector set when the
413   // evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data
414   // found during the previous GC pass which is one less than the current tenure age.
415   //
416   // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
417   //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
418   //    num_old_regions is the number of old-gen regions that have available memory for further allocations (excluding old cset)
419   // have_evacuation_reserves is true iff the desired values of young-gen and old-gen evacuation reserves and old-gen
420   //                    promotion reserve have been precomputed (and can be obtained by invoking
421   //                    <generation>->get_evacuation_reserve() or old_gen->get_promoted_reserve()
422   void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions,
423                       bool have_evacuation_reserves = false);
424 
425   // When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size()
426   // into the old collector partition by invoking this method.
427   void add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region);
428 
429   // Move up to cset_regions number of regions from being available to the collector to being available to the mutator.
430   //
431   // Typical usage: At the end of evacuation, when the collector no longer needs the regions that had been reserved
432   // for evacuation, invoke this to make regions available for mutator allocations.





433   void move_regions_from_collector_to_mutator(size_t cset_regions);
434 
435   void recycle_trash();
436 
437   // Acquire heap lock and log status, assuming heap lock is not acquired by the caller.
438   void log_status_under_lock();
439 
440   inline size_t capacity()  const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); }
441   inline size_t used()      const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);     }
442   inline size_t available() const {
443     assert(used() <= capacity(), "must use less than capacity");
444     return capacity() - used();
445   }
446 
447   HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region);

448 
449   /*
450    * Internal fragmentation metric: describes how fragmented the heap regions are.
451    *
452    * It is derived as:
453    *
454    *               sum(used[i]^2, i=0..k)
455    *   IF = 1 - ------------------------------
456    *              C * sum(used[i], i=0..k)
457    *
458    * ...where k is the number of regions in computation, C is the region capacity, and
459    * used[i] is the used space in the region.
460    *
461    * The non-linearity causes IF to be lower for the cases where the same total heap
462    * used is densely packed. For example:
463    *   a) Heap is completely full  => IF = 0
464    *   b) Heap is half full, first 50% regions are completely full => IF = 0
465    *   c) Heap is half full, each region is 50% full => IF = 1/2
466    *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
467    *   e) Heap is quarter full, each region is 25% full => IF = 3/4
468    *   f) Heap has one small object per each region => IF =~ 1
469    */
470   double internal_fragmentation();
471 
472   /*
473    * External fragmentation metric: describes how fragmented the heap is.
474    *
475    * It is derived as:
476    *
477    *   EF = 1 - largest_contiguous_free / total_free
478    *
479    * For example:
480    *   a) Heap is completely empty => EF = 0
481    *   b) Heap is completely full => EF = 0
482    *   c) Heap is first-half full => EF = 1/2
483    *   d) Heap is half full, full and empty regions interleave => EF =~ 1
484    */
485   double external_fragmentation();
486 
487   void print_on(outputStream* out) const;
488 
489   // This function places all regions that have allocation capacity into the mutator partition, or if the region
490   // is already affiliated with old, into the old collector partition, identifying regions that have no allocation
491   // capacity as NotFree.  Capture the modified state of the freeset into var arguments:
492   //
493   // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
494   //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
495   //   first_old_region is the index of the first region that is part of the OldCollector set
496   //    last_old_region is the index of the last region that is part of the OldCollector set
497   //   old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
498   void find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
499                                         size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
500 
501   // Ensure that Collector has at least to_reserve bytes of available memory, and OldCollector has at least old_reserve
502   // bytes of available memory.  On input, old_region_count holds the number of regions already present in the
503   // OldCollector partition.  Upon return, old_region_count holds the updated number of regions in the OldCollector partition.
504   void reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count);
505 
506   // Reserve space for evacuations, with regions reserved for old evacuations placed to the right
507   // of regions reserved of young evacuations.
508   void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
509                                       size_t &young_reserve_result, size_t &old_reserve_result) const;
510 };
511 
512 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
< prev index next >