1 
  2 /*
  3  * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
 28 #define SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP
 29 
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahHeap.hpp"
 32 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
 33 
 34 // Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId.
 35 enum class ShenandoahFreeSetPartitionId : uint8_t {
 36   Mutator,                      // Region is in the Mutator free set: available memory is available to mutators.
 37   Collector,                    // Region is in the Collector free set: available memory is reserved for evacuations.
 38   OldCollector,                 // Region is in the Old Collector free set:
 39                                 //    available memory is reserved for old evacuations and for promotions..
 40   NotFree                       // Region is in no free set: it has no available memory
 41 };
 42 
 43 // We do not maintain counts, capacity, or used for regions that are not free.  Informally, if a region is NotFree, it is
 44 // in no partition.  NumPartitions represents the size of an array that may be indexed by Mutator or Collector.
 45 #define NumPartitions           (ShenandoahFreeSetPartitionId::NotFree)
 46 #define IntNumPartitions     int(ShenandoahFreeSetPartitionId::NotFree)
 47 #define UIntNumPartitions   uint(ShenandoahFreeSetPartitionId::NotFree)
 48 
 49 // ShenandoahRegionPartitions provides an abstraction to help organize the implementation of ShenandoahFreeSet.  This
 50 // class implements partitioning of regions into distinct sets.  Each ShenandoahHeapRegion is either in the Mutator free set,
 51 // the Collector free set, or in neither free set (NotFree).  When we speak of a "free partition", we mean partitions that
 52 // for which the ShenandoahFreeSetPartitionId is not equal to NotFree.
 53 class ShenandoahRegionPartitions {
 54 
 55 private:
 56   const ssize_t _max;           // The maximum number of heap regions
 57   const size_t _region_size_bytes;
 58   const ShenandoahFreeSet* _free_set;
 59   // For each partition, we maintain a bitmap of which regions are affiliated with his partition.
 60   ShenandoahSimpleBitMap _membership[UIntNumPartitions];
 61 
 62   // For each partition, we track an interval outside of which a region affiliated with that partition is guaranteed
 63   // not to be found. This makes searches for free space more efficient.  For each partition p, _leftmosts[p]
 64   // represents its least index, and its _rightmosts[p] its greatest index. Empty intervals are indicated by the
 65   // canonical [_max, -1].
 66   ssize_t _leftmosts[UIntNumPartitions];
 67   ssize_t _rightmosts[UIntNumPartitions];
 68 
 69   // Allocation for humongous objects needs to find regions that are entirely empty.  For each partion p, _leftmosts_empty[p]
 70   // represents the first region belonging to this partition that is completely empty and _rightmosts_empty[p] represents the
 71   // last region that is completely empty.  If there is no completely empty region in this partition, this is represented
 72   // by the canonical [_max, -1].
 73   ssize_t _leftmosts_empty[UIntNumPartitions];
 74   ssize_t _rightmosts_empty[UIntNumPartitions];
 75 
 76   // For each partition p, _capacity[p] represents the total amount of memory within the partition at the time
 77   // of the most recent rebuild, _used[p] represents the total amount of memory that has been allocated within this
 78   // partition (either already allocated as of the rebuild, or allocated since the rebuild).  _capacity[p] and _used[p]
 79   // are denoted in bytes.  Note that some regions that had been assigned to a particular partition at rebuild time
 80   // may have been retired following the rebuild.  The tallies for these regions are still reflected in _capacity[p]
 81   // and _used[p], even though the region may have been removed from the free set.
 82   size_t _capacity[UIntNumPartitions];
 83   size_t _used[UIntNumPartitions];
 84   size_t _region_counts[UIntNumPartitions];
 85 
 86   // For each partition p, _left_to_right_bias is true iff allocations are normally made from lower indexed regions
 87   // before higher indexed regions.
 88   bool _left_to_right_bias[UIntNumPartitions];
 89 
 90   // Shrink the intervals associated with partition when region idx is removed from this free set
 91   inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx);
 92 
 93   // Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set
 94   inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
 95                                                                 ssize_t low_idx, ssize_t high_idx);
 96   inline void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx, size_t capacity);
 97 
 98   inline bool is_mutator_partition(ShenandoahFreeSetPartitionId p);
 99   inline bool is_young_collector_partition(ShenandoahFreeSetPartitionId p);
100   inline bool is_old_collector_partition(ShenandoahFreeSetPartitionId p);
101   inline bool available_implies_empty(size_t available);
102 
103 #ifndef PRODUCT
104   void dump_bitmap_row(ssize_t region_idx) const;
105   void dump_bitmap_range(ssize_t start_region_idx, ssize_t end_region_idx) const;
106   void dump_bitmap() const;
107 #endif
108 public:
109   ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set);
110   ~ShenandoahRegionPartitions() {}
111 
112   // Remove all regions from all partitions and reset all bounds
113   void make_all_regions_unavailable();
114 
115   // Set the partition id for a particular region without adjusting interval bounds or usage/capacity tallies
116   inline void raw_assign_membership(size_t idx, ShenandoahFreeSetPartitionId p) {
117     _membership[int(p)].set_bit(idx);
118   }
119 
120   // Set the Mutator intervals, usage, and capacity according to arguments.  Reset the Collector intervals, used, capacity
121   // to represent empty Collector free set.  We use this at the end of rebuild_free_set() to avoid the overhead of making
122   // many redundant incremental adjustments to the mutator intervals as the free set is being rebuilt.
123   void establish_mutator_intervals(ssize_t mutator_leftmost, ssize_t mutator_rightmost,
124                                    ssize_t mutator_leftmost_empty, ssize_t mutator_rightmost_empty,
125                                    size_t mutator_region_count, size_t mutator_used);
126 
127   // Set the OldCollector intervals, usage, and capacity according to arguments.  We use this at the end of rebuild_free_set()
128   // to avoid the overhead of making many redundant incremental adjustments to the mutator intervals as the free set is being
129   // rebuilt.
130   void establish_old_collector_intervals(ssize_t old_collector_leftmost, ssize_t old_collector_rightmost,
131                                          ssize_t old_collector_leftmost_empty, ssize_t old_collector_rightmost_empty,
132                                          size_t old_collector_region_count, size_t old_collector_used);
133 
134   // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals.
135   // Requires that region idx is in in the Mutator or Collector partitions.  Hereafter, identifies this region as NotFree.
136   // Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes.
137   void retire_from_partition(ShenandoahFreeSetPartitionId p, ssize_t idx, size_t used_bytes);
138 
139   // Retire all regions between low_idx and high_idx inclusive from within partition.  Requires that each region idx is
140   // in the same Mutator or Collector partition.  Hereafter, identifies each region as NotFree.   Assumes that each region
141   // is now considered fully used, since the region is presumably used to represent a humongous object.
142   void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, ssize_t low_idx, ssize_t high_idx);
143 
144   // Place region idx into free set which_partition.  Requires that idx is currently NotFree.
145   void make_free(ssize_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity);
146 
147   // Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition
148   // given that available bytes can still be allocated within this region.  Requires that idx is currently not NotFree.
149   void move_from_partition_to_partition(ssize_t idx, ShenandoahFreeSetPartitionId orig_partition,
150                                         ShenandoahFreeSetPartitionId new_partition, size_t available);
151 
152   const char* partition_membership_name(ssize_t idx) const;
153 
154   // Return the index of the next available region >= start_index, or maximum_regions if not found.
155   inline ssize_t find_index_of_next_available_region(ShenandoahFreeSetPartitionId which_partition, ssize_t start_index) const;
156 
157   // Return the index of the previous available region <= last_index, or -1 if not found.
158   inline ssize_t find_index_of_previous_available_region(ShenandoahFreeSetPartitionId which_partition, ssize_t last_index) const;
159 
160   // Return the index of the next available cluster of cluster_size regions >= start_index, or maximum_regions if not found.
161   inline ssize_t find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition,
162                                                                  ssize_t start_index, size_t cluster_size) const;
163 
164   // Return the index of the previous available cluster of cluster_size regions <= last_index, or -1 if not found.
165   inline ssize_t find_index_of_previous_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition,
166                                                                      ssize_t last_index, size_t cluster_size) const;
167 
168   inline bool in_free_set(ShenandoahFreeSetPartitionId which_partition, ssize_t idx) const {
169     return _membership[int(which_partition)].is_set(idx);
170   }
171 
172   // Returns the ShenandoahFreeSetPartitionId affiliation of region idx, NotFree if this region is not currently in any partition.
173   // This does not enforce that free_set membership implies allocation capacity.
174   inline ShenandoahFreeSetPartitionId membership(ssize_t idx) const;
175 
176 #ifdef ASSERT
177   // Returns true iff region idx's membership is which_partition.  If which_partition represents a free set, asserts
178   // that the region has allocation capacity.
179   inline bool partition_id_matches(ssize_t idx, ShenandoahFreeSetPartitionId which_partition) const;
180 #endif
181 
182   inline size_t max_regions() const { return _max; }
183 
184   inline size_t region_size_bytes() const { return _region_size_bytes; };
185 
186   // The following four methods return the left-most and right-most bounds on ranges of regions representing
187   // the requested set.  The _empty variants represent bounds on the range that holds completely empty
188   // regions, which are required for humongous allocations and desired for "very large" allocations.
189   //   if the requested which_partition is empty:
190   //     leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0
191   //   otherwise, expect the following:
192   //     0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max
193   inline ssize_t leftmost(ShenandoahFreeSetPartitionId which_partition) const;
194   inline ssize_t rightmost(ShenandoahFreeSetPartitionId which_partition) const;
195   ssize_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition);
196   ssize_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition);
197 
198   inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const;
199 
200   inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
201 
202   inline void set_bias_from_left_to_right(ShenandoahFreeSetPartitionId which_partition, bool value) {
203     assert (which_partition < NumPartitions, "selected free set must be valid");
204     _left_to_right_bias[int(which_partition)] = value;
205   }
206 
207   inline bool alloc_from_left_bias(ShenandoahFreeSetPartitionId which_partition) const {
208     assert (which_partition < NumPartitions, "selected free set must be valid");
209     return _left_to_right_bias[int(which_partition)];
210   }
211 
212   inline size_t capacity_of(ShenandoahFreeSetPartitionId which_partition) const {
213     assert (which_partition < NumPartitions, "selected free set must be valid");
214     return _capacity[int(which_partition)];
215   }
216 
217   inline size_t used_by(ShenandoahFreeSetPartitionId which_partition) const {
218     assert (which_partition < NumPartitions, "selected free set must be valid");
219     return _used[int(which_partition)];
220   }
221 
222   inline size_t available_in(ShenandoahFreeSetPartitionId which_partition) const {
223     assert (which_partition < NumPartitions, "selected free set must be valid");
224     return _capacity[int(which_partition)] - _used[int(which_partition)];
225   }
226 
227   inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value) {
228     assert (which_partition < NumPartitions, "selected free set must be valid");
229     _capacity[int(which_partition)] = value;
230   }
231 
232   inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
233     assert (which_partition < NumPartitions, "selected free set must be valid");
234     _used[int(which_partition)] = value;
235   }
236 
237   inline size_t count(ShenandoahFreeSetPartitionId which_partition) const { return _region_counts[int(which_partition)]; }
238 
239   // Assure leftmost, rightmost, leftmost_empty, and rightmost_empty bounds are valid for all free sets.
240   // Valid bounds honor all of the following (where max is the number of heap regions):
241   //   if the set is empty, leftmost equals max and rightmost equals 0
242   //   Otherwise (the set is not empty):
243   //     0 <= leftmost < max and 0 <= rightmost < max
244   //     the region at leftmost is in the set
245   //     the region at rightmost is in the set
246   //     rightmost >= leftmost
247   //     for every idx that is in the set {
248   //       idx >= leftmost &&
249   //       idx <= rightmost
250   //     }
251   //   if the set has no empty regions, leftmost_empty equals max and rightmost_empty equals 0
252   //   Otherwise (the region has empty regions):
253   //     0 <= leftmost_empty < max and 0 <= rightmost_empty < max
254   //     rightmost_empty >= leftmost_empty
255   //     for every idx that is in the set and is empty {
256   //       idx >= leftmost &&
257   //       idx <= rightmost
258   //     }
259   void assert_bounds() NOT_DEBUG_RETURN;
260 };
261 
262 // Publicly, ShenandoahFreeSet represents memory that is available to mutator threads.  The public capacity(), used(),
263 // and available() methods represent this public notion of memory that is under control of the mutator.  Separately,
264 // ShenandoahFreeSet also represents memory available to garbage collection activities for compaction purposes.
265 //
266 // The Shenandoah garbage collector evacuates live objects out of specific regions that are identified as members of the
267 // collection set (cset).
268 //
269 // The ShenandoahFreeSet tries to colocate survivor objects (objects that have been evacuated at least once) at the
270 // high end of memory.  New mutator allocations are taken from the low end of memory.  Within the mutator's range of regions,
271 // humongous allocations are taken from the lowest addresses, and LAB (local allocation buffers) and regular shared allocations
272 // are taken from the higher address of the mutator's range of regions.  This approach allows longer lasting survivor regions
273 // to congregate at the top of the heap and longer lasting humongous regions to congregate at the bottom of the heap, with
274 // short-lived frequently evacuated regions occupying the middle of the heap.
275 //
276 // Mutator and garbage collection activities tend to scramble the content of regions.  Twice, during each GC pass, we rebuild
277 // the free set in an effort to restore the efficient segregation of Collector and Mutator regions:
278 //
279 //  1. At the start of evacuation, we know exactly how much memory is going to be evacuated, and this guides our
280 //     sizing of the Collector free set.
281 //
282 //  2. At the end of GC, we have reclaimed all of the memory that was spanned by the cset.  We rebuild here to make
283 //     sure there is enough memory reserved at the high end of memory to hold the objects that might need to be evacuated
284 //     during the next GC pass.
285 
286 class ShenandoahFreeSet : public CHeapObj<mtGC> {
287 private:
288   ShenandoahHeap* const _heap;
289   ShenandoahRegionPartitions _partitions;
290   ShenandoahHeapRegion** _trash_regions;
291   size_t _retired_old_regions;
292 
293   HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r);
294 
295   // Return the address of memory allocated, setting in_new_region to true iff the allocation is taken
296   // from a region that was previously empty.  Return nullptr if memory could not be allocated.
297   inline HeapWord* allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition,
298                                                             ShenandoahAffiliation affiliation,
299                                                             ShenandoahAllocRequest& req, bool& in_new_region);
300 
301   // We re-evaluate the left-to-right allocation bias whenever _alloc_bias_weight is less than zero.  Each time
302   // we allocate an object, we decrement the count of this value.  Each time we re-evaluate whether to allocate
303   // from right-to-left or left-to-right, we reset the value of this counter to _InitialAllocBiasWeight.
304   ssize_t _alloc_bias_weight;
305 
306   const ssize_t _InitialAllocBiasWeight = 256;
307 
308   HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region);
309 
310   // While holding the heap lock, allocate memory for a single object or LAB  which is to be entirely contained
311   // within a single HeapRegion as characterized by req.
312   //
313   // Precondition: !ShenandoahHeapRegion::requires_humongous(req.size())
314   HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region);
315 
316   // While holding the heap lock, allocate memory for a humongous object which spans one or more regions that
317   // were previously empty.  Regions that represent humongous objects are entirely dedicated to the humongous
318   // object.  No other objects are packed into these regions.
319   //
320   // Precondition: ShenandoahHeapRegion::requires_humongous(req.size())
321   HeapWord* allocate_contiguous(ShenandoahAllocRequest& req);
322 
323   // Change region r from the Mutator partition to the GC's Collector or OldCollector partition.  This requires that the
324   // region is entirely empty.
325   //
326   // Typical usage: During evacuation, the GC may find it needs more memory than had been reserved at the start of evacuation to
327   // hold evacuated objects.  If this occurs and memory is still available in the Mutator's free set, we will flip a region from
328   // the Mutator free set into the Collector or OldCollector free set.
329   void flip_to_gc(ShenandoahHeapRegion* r);
330   void flip_to_old_gc(ShenandoahHeapRegion* r);
331 
332   void clear_internal();
333   void try_recycle_trashed(ShenandoahHeapRegion *r);
334 
335   // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
336   // immediate trash and we'll be able to immediately recycle it.  Note that we cannot recycle immediate trash if
337   // concurrent weak root processing is in progress.
338   inline bool can_allocate_from(ShenandoahHeapRegion *r) const;
339   inline bool can_allocate_from(size_t idx) const;
340 
341   inline bool has_alloc_capacity(ShenandoahHeapRegion *r) const;
342 
343   size_t transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
344                                                                   size_t max_xfer_regions,
345                                                                   size_t& bytes_transferred);
346   size_t transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId collector_id,
347                                                                       size_t max_xfer_regions,
348                                                                       size_t& bytes_transferred);
349 
350 
351   // Determine whether we prefer to allocate from left to right or from right to left within the OldCollector free-set.
352   void establish_old_collector_alloc_bias();
353 
354   // Set max_capacity for young and old generations
355   void establish_generation_sizes(size_t young_region_count, size_t old_region_count);
356   size_t get_usable_free_words(size_t free_bytes) const;
357 
358   // log status, assuming lock has already been acquired by the caller.
359   void log_status();
360 
361 public:
362   ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
363 
364   // Public because ShenandoahRegionPartitions assertions require access.
365   inline size_t alloc_capacity(ShenandoahHeapRegion *r) const;
366   inline size_t alloc_capacity(size_t idx) const;
367 
368   void clear();
369 
370   // Examine the existing free set representation, capturing the current state into var arguments:
371   //
372   // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
373   //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
374   //   first_old_region is the index of the first region that is part of the OldCollector set
375   //    last_old_region is the index of the last region that is part of the OldCollector set
376   //   old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
377   void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
378                           size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
379 
380   // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
381   // hold the results of evacuating to young-gen and to old-gen, and have_evacuation_reserves should be true.
382   // These quantities, stored as reserves for their respective generations, are consulted prior to rebuilding
383   // the free set (ShenandoahFreeSet) in preparation for evacuation.  When the free set is rebuilt, we make sure
384   // to reserve sufficient memory in the collector and old_collector sets to hold evacuations.
385   //
386   // We also rebuild the free set at the end of GC, as we prepare to idle GC until the next trigger.  In this case,
387   // have_evacuation_reserves is false because we don't yet know how much memory will need to be evacuated in the
388   // next GC cycle.  When have_evacuation_reserves is false, the free set rebuild operation reserves for the collector
389   // and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and
390   // ShenandoahOldCompactionReserve.  In a future planned enhancement, the reserve for old_collector set when the
391   // evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data
392   // found during the previous GC pass which is one less than the current tenure age.
393   //
394   // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
395   //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
396   //    num_old_regions is the number of old-gen regions that have available memory for further allocations (excluding old cset)
397   // have_evacuation_reserves is true iff the desired values of young-gen and old-gen evacuation reserves and old-gen
398   //                    promotion reserve have been precomputed (and can be obtained by invoking
399   //                    <generation>->get_evacuation_reserve() or old_gen->get_promoted_reserve()
400   void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions,
401                       bool have_evacuation_reserves = false);
402 
403   // When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size()
404   // into the old collector partition by invoking this method.
405   void add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region);
406 
407   // Move up to cset_regions number of regions from being available to the collector to being available to the mutator.
408   //
409   // Typical usage: At the end of evacuation, when the collector no longer needs the regions that had been reserved
410   // for evacuation, invoke this to make regions available for mutator allocations.
411   void move_regions_from_collector_to_mutator(size_t cset_regions);
412 
413   void recycle_trash();
414 
415   // Acquire heap lock and log status, assuming heap lock is not acquired by the caller.
416   void log_status_under_lock();
417 
418   inline size_t capacity()  const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); }
419   inline size_t used()      const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);     }
420   inline size_t available() const {
421     assert(used() <= capacity(), "must use less than capacity");
422     return capacity() - used();
423   }
424 
425   HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region);
426   size_t unsafe_peek_free() const;
427 
428   /*
429    * Internal fragmentation metric: describes how fragmented the heap regions are.
430    *
431    * It is derived as:
432    *
433    *               sum(used[i]^2, i=0..k)
434    *   IF = 1 - ------------------------------
435    *              C * sum(used[i], i=0..k)
436    *
437    * ...where k is the number of regions in computation, C is the region capacity, and
438    * used[i] is the used space in the region.
439    *
440    * The non-linearity causes IF to be lower for the cases where the same total heap
441    * used is densely packed. For example:
442    *   a) Heap is completely full  => IF = 0
443    *   b) Heap is half full, first 50% regions are completely full => IF = 0
444    *   c) Heap is half full, each region is 50% full => IF = 1/2
445    *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
446    *   e) Heap is quarter full, each region is 25% full => IF = 3/4
447    *   f) Heap has one small object per each region => IF =~ 1
448    */
449   double internal_fragmentation();
450 
451   /*
452    * External fragmentation metric: describes how fragmented the heap is.
453    *
454    * It is derived as:
455    *
456    *   EF = 1 - largest_contiguous_free / total_free
457    *
458    * For example:
459    *   a) Heap is completely empty => EF = 0
460    *   b) Heap is completely full => EF = 0
461    *   c) Heap is first-half full => EF = 1/2
462    *   d) Heap is half full, full and empty regions interleave => EF =~ 1
463    */
464   double external_fragmentation();
465 
466   void print_on(outputStream* out) const;
467 
468   // This function places all regions that have allocation capacity into the mutator partition, or if the region
469   // is already affiliated with old, into the old collector partition, identifying regions that have no allocation
470   // capacity as NotFree.  Capture the modified state of the freeset into var arguments:
471   //
472   // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
473   //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
474   //   first_old_region is the index of the first region that is part of the OldCollector set
475   //    last_old_region is the index of the last region that is part of the OldCollector set
476   //   old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
477   void find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
478                                         size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
479 
480   // Ensure that Collector has at least to_reserve bytes of available memory, and OldCollector has at least old_reserve
481   // bytes of available memory.  On input, old_region_count holds the number of regions already present in the
482   // OldCollector partition.  Upon return, old_region_count holds the updated number of regions in the OldCollector partition.
483   void reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count);
484 
485   // Reserve space for evacuations, with regions reserved for old evacuations placed to the right
486   // of regions reserved of young evacuations.
487   void compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
488                                       size_t &young_reserve_result, size_t &old_reserve_result) const;
489 };
490 
491 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHFREESET_HPP