< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page

  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/tlab_globals.hpp"

 27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"



 31 #include "logging/logStream.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "runtime/orderAccess.hpp"
 34 
 35 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 36   _heap(heap),
 37   _mutator_free_bitmap(max_regions, mtGC),
 38   _collector_free_bitmap(max_regions, mtGC),
 39   _max(max_regions)
 40 {
 41   clear_internal();
 42 }
 43 
 44 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
 45   shenandoah_assert_heaplocked();
 46   _used += num_bytes;
 47 
 48   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
 49          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
 50 }
 51 
 52 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
 53   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 54           idx, _max, _mutator_leftmost, _mutator_rightmost);
 55   return _mutator_free_bitmap.at(idx);
 56 }
 57 
 58 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
 59   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 60           idx, _max, _collector_leftmost, _collector_rightmost);
 61   return _collector_free_bitmap.at(idx);
 62 }
 63 












































 64 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 65   // Scan the bitmap looking for a first fit.
 66   //
 67   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 68   // we would find the region to allocate at right away.
 69   //
 70   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
 71   // go to the end. This makes application allocation faster, because we would clear lots
 72   // of regions from the beginning most of the time.
 73   //
 74   // Free set maintains mutator and collector views, and normally they allocate in their views only,
 75   // unless we special cases for stealing and mixed allocations.
 76 
























 77   switch (req.type()) {
 78     case ShenandoahAllocRequest::_alloc_tlab:
 79     case ShenandoahAllocRequest::_alloc_shared: {
 80 
 81       // Try to allocate in the mutator view
 82       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 83         if (is_mutator_free(idx)) {
 84           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);


 85           if (result != NULL) {
 86             return result;
 87           }
 88         }
 89       }
 90 
 91       // There is no recovery. Mutator does not touch collector view at all.
 92       break;
 93     }
 94     case ShenandoahAllocRequest::_alloc_gclab:
 95     case ShenandoahAllocRequest::_alloc_shared_gc: {
 96       // size_t is unsigned, need to dodge underflow when _leftmost = 0
 97 
 98       // Fast-path: try to allocate in the collector view first
 99       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
100         size_t idx = c - 1;
101         if (is_collector_free(idx)) {
102           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
103           if (result != NULL) {
104             return result;
105           }













106         }
107       }
108 
109       // No dice. Can we borrow space from mutator view?
110       if (!ShenandoahEvacReserveOverflow) {
111         return NULL;
112       }
113 
114       // Try to steal the empty region from the mutator view
115       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
116         size_t idx = c - 1;
117         if (is_mutator_free(idx)) {
118           ShenandoahHeapRegion* r = _heap->get_region(idx);
119           if (can_allocate_from(r)) {
120             flip_to_gc(r);
121             HeapWord *result = try_allocate_in(r, req, in_new_region);
122             if (result != NULL) {
123               return result;


124             }
125           }
126         }
127       }
128 
129       // No dice. Do not try to mix mutator and GC allocations, because
130       // URWM moves due to GC allocations would expose unparsable mutator
131       // allocations.
132 
133       break;
134     }
135     default:
136       ShouldNotReachHere();
137   }
138 
139   return NULL;
140 }
141 
142 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
143   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
144 
145   if (_heap->is_concurrent_weak_root_in_progress() &&
146       r->is_trash()) {
147     return NULL;
148   }
149 
150   try_recycle_trashed(r);













151 
152   in_new_region = r->is_empty();










153 

154   HeapWord* result = NULL;
155   size_t size = req.size();
156 

157   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
158     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
159     if (size > free) {
160       size = free;

































































161     }
162     if (size >= req.min_size()) {
163       result = r->allocate(size, req.type());
164       assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);




























165     }
166   } else {
167     result = r->allocate(size, req.type());




168   }
169 
170   if (result != NULL) {
171     // Allocation successful, bump stats:
172     if (req.is_mutator_alloc()) {


173       increase_used(size * HeapWordSize);
174     }
175 
176     // Record actual allocation size
177     req.set_actual_size(size);
178 
179     if (req.is_gc_alloc()) {




180       r->set_update_watermark(r->top());









181     }
182   }
183 
184   if (result == NULL || has_no_alloc_capacity(r)) {
185     // Region cannot afford this or future allocations. Retire it.
186     //
187     // While this seems a bit harsh, especially in the case when this large allocation does not
188     // fit, but the next small one would, we are risking to inflate scan times when lots of
189     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
190     // TODO: Record first fully-empty region, and use that for large allocations

191 
192     // Record the remainder as allocation waste
193     if (req.is_mutator_alloc()) {
194       size_t waste = r->free();
195       if (waste > 0) {
196         increase_used(waste);

197         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
198       }
199     }
200 
201     size_t num = r->index();
202     _collector_free_bitmap.clear_bit(num);
203     _mutator_free_bitmap.clear_bit(num);
204     // Touched the bounds? Need to update:
205     if (touches_bounds(num)) {
206       adjust_bounds();
207     }
208     assert_bounds();
209   }
210   return result;
211 }
212 
213 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
214   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
215 }
216 

231     _mutator_leftmost++;
232   }
233   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
234     _mutator_rightmost--;
235   }
236   // Rewind both collector bounds until the next bit.
237   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
238     _collector_leftmost++;
239   }
240   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
241     _collector_rightmost--;
242   }
243 }
244 
245 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
246   shenandoah_assert_heaplocked();
247 
248   size_t words_size = req.size();
249   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
250 



251   // No regions left to satisfy allocation, bye.
252   if (num > mutator_count()) {
253     return NULL;
254   }
255 
256   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
257   // inclusive. Contiguous allocations are biased to the beginning.
258 
259   size_t beg = _mutator_leftmost;
260   size_t end = beg;
261 
262   while (true) {
263     if (end >= _max) {
264       // Hit the end, goodbye
265       return NULL;
266     }
267 
268     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
269     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
270     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
271       end++;
272       beg = end;
273       continue;
274     }
275 
276     if ((end - beg + 1) == num) {
277       // found the match
278       break;
279     }
280 
281     end++;
282   };
283 
284   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();

285 
286   // Initialize regions:
287   for (size_t i = beg; i <= end; i++) {
288     ShenandoahHeapRegion* r = _heap->get_region(i);
289     try_recycle_trashed(r);
290 
291     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
292     assert(r->is_empty(), "Should be empty");
293 
294     if (i == beg) {
295       r->make_humongous_start();
296     } else {
297       r->make_humongous_cont();
298     }
299 
300     // Trailing region may be non-full, record the remainder there
301     size_t used_words;
302     if ((i == end) && (remainder != 0)) {
303       used_words = remainder;
304     } else {
305       used_words = ShenandoahHeapRegion::region_size_words();
306     }
307 
308     r->set_top(r->bottom() + used_words);













309 
310     _mutator_free_bitmap.clear_bit(r->index());
311   }
312 
313   // While individual regions report their true use, all humongous regions are
314   // marked used in the free set.
315   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);





316 
317   if (remainder != 0) {
318     // Record this remainder as allocation waste
319     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);


320   }
321 
322   // Allocated at left/rightmost? Move the bounds appropriately.
323   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
324     adjust_bounds();
325   }
326   assert_bounds();
327 
328   req.set_actual_size(words_size);
329   return _heap->get_region(beg)->bottom();
330 }
331 
332 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
333   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
334 }
335 
336 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
337   if (r->is_trash()) {
338     // This would be recycled on allocation path
339     return ShenandoahHeapRegion::region_size_bytes();
340   } else {
341     return r->free();
342   }
343 }
344 
345 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
346   return alloc_capacity(r) == 0;
347 }
348 
349 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
350   if (r->is_trash()) {
351     _heap->decrease_used(r->used());
352     r->recycle();
353   }
354 }
355 
356 void ShenandoahFreeSet::recycle_trash() {
357   // lock is not reentrable, check we don't have it
358   shenandoah_assert_not_heaplocked();
359 
360   for (size_t i = 0; i < _heap->num_regions(); i++) {
361     ShenandoahHeapRegion* r = _heap->get_region(i);
362     if (r->is_trash()) {
363       ShenandoahHeapLocker locker(_heap->lock());
364       try_recycle_trashed(r);
365     }
366     SpinPause(); // allow allocators to take the lock
367   }
368 }
369 
370 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
371   size_t idx = r->index();
372 
373   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
374   assert(can_allocate_from(r), "Should not be allocated");
375 
376   _mutator_free_bitmap.clear_bit(idx);
377   _collector_free_bitmap.set_bit(idx);
378   _collector_leftmost = MIN2(idx, _collector_leftmost);
379   _collector_rightmost = MAX2(idx, _collector_rightmost);
380 
381   _capacity -= alloc_capacity(r);
382 
383   if (touches_bounds(idx)) {
384     adjust_bounds();
385   }
386   assert_bounds();




387 }
388 
389 void ShenandoahFreeSet::clear() {
390   shenandoah_assert_heaplocked();
391   clear_internal();
392 }
393 
394 void ShenandoahFreeSet::clear_internal() {
395   _mutator_free_bitmap.clear();
396   _collector_free_bitmap.clear();
397   _mutator_leftmost = _max;
398   _mutator_rightmost = 0;
399   _collector_leftmost = _max;
400   _collector_rightmost = 0;
401   _capacity = 0;
402   _used = 0;
403 }
404 
405 void ShenandoahFreeSet::rebuild() {
406   shenandoah_assert_heaplocked();
407   clear();
408 

409   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
410     ShenandoahHeapRegion* region = _heap->get_region(idx);
411     if (region->is_alloc_allowed() || region->is_trash()) {
412       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
413 
414       // Do not add regions that would surely fail allocation
415       if (has_no_alloc_capacity(region)) continue;
416 
417       _capacity += alloc_capacity(region);
418       assert(_used <= _capacity, "must not use more than we have");
419 
420       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
421       _mutator_free_bitmap.set_bit(idx);


422     }
423   }
424 
425   // Evac reserve: reserve trailing space for evacuations
426   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;



















427   size_t reserved = 0;
428 
429   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
430     if (reserved >= to_reserve) break;
431 
432     ShenandoahHeapRegion* region = _heap->get_region(idx);
433     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
434       _mutator_free_bitmap.clear_bit(idx);
435       _collector_free_bitmap.set_bit(idx);
436       size_t ac = alloc_capacity(region);
437       _capacity -= ac;
438       reserved += ac;

439     }
440   }
441 
442   recompute_bounds();
443   assert_bounds();
444 }
445 
446 void ShenandoahFreeSet::log_status() {
447   shenandoah_assert_heaplocked();
448 
449   LogTarget(Info, gc, ergo) lt;
450   if (lt.is_enabled()) {
451     ResourceMark rm;
452     LogStream ls(lt);
453 
454     {
455       size_t last_idx = 0;
456       size_t max = 0;
457       size_t max_contig = 0;
458       size_t empty_contig = 0;
459 
460       size_t total_used = 0;
461       size_t total_free = 0;
462       size_t total_free_ext = 0;
463 

521       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
522         if (is_collector_free(idx)) {
523           ShenandoahHeapRegion *r = _heap->get_region(idx);
524           size_t free = alloc_capacity(r);
525           max = MAX2(max, free);
526           total_free += free;
527         }
528       }
529 
530       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
531                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
532                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
533     }
534   }
535 }
536 
537 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
538   shenandoah_assert_heaplocked();
539   assert_bounds();
540 

541   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
542     switch (req.type()) {
543       case ShenandoahAllocRequest::_alloc_shared:
544       case ShenandoahAllocRequest::_alloc_shared_gc:
545         in_new_region = true;
546         return allocate_contiguous(req);

547       case ShenandoahAllocRequest::_alloc_gclab:
548       case ShenandoahAllocRequest::_alloc_tlab:
549         in_new_region = false;
550         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
551                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
552         return NULL;
553       default:
554         ShouldNotReachHere();
555         return NULL;
556     }
557   } else {
558     return allocate_single(req, in_new_region);
559   }
560 }
561 
562 size_t ShenandoahFreeSet::unsafe_peek_free() const {
563   // Deliberately not locked, this method is unsafe when free set is modified.
564 
565   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
566     if (index < _max && is_mutator_free(index)) {

  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/tlab_globals.hpp"
 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 32 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 33 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 35 #include "logging/logStream.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "runtime/orderAccess.hpp"
 38 
 39 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 40   _heap(heap),
 41   _mutator_free_bitmap(max_regions, mtGC),
 42   _collector_free_bitmap(max_regions, mtGC),
 43   _max(max_regions)
 44 {
 45   clear_internal();
 46 }
 47 
 48 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
 49   shenandoah_assert_heaplocked();
 50   _used += num_bytes;
 51 
 52   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
 53          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
 54 }
 55 
 56 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
 57   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 58           idx, _max, _mutator_leftmost, _mutator_rightmost);
 59   return _mutator_free_bitmap.at(idx);
 60 }
 61 
 62 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
 63   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 64           idx, _max, _collector_leftmost, _collector_rightmost);
 65   return _collector_free_bitmap.at(idx);
 66 }
 67 
 68 // This is a temporary solution to work around a shortcoming with the existing free set implementation.
 69 // TODO:
 70 //   Remove this function after restructing FreeSet representation.  A problem in the existing implementation is that old-gen
 71 //   regions are not considered to reside within the is_collector_free range.
 72 //
 73 HeapWord* ShenandoahFreeSet::allocate_with_old_affiliation(ShenandoahAllocRequest& req, bool& in_new_region) {
 74   ShenandoahRegionAffiliation affiliation = ShenandoahRegionAffiliation::OLD_GENERATION;
 75 
 76   size_t rightmost = MAX2(_collector_rightmost, _mutator_rightmost);
 77   size_t leftmost = MIN2(_collector_leftmost, _mutator_leftmost);
 78 
 79   for (size_t c = rightmost + 1; c > leftmost; c--) {
 80     // size_t is unsigned, need to dodge underflow when _leftmost = 0
 81     size_t idx = c - 1;
 82     ShenandoahHeapRegion* r = _heap->get_region(idx);
 83     if (r->affiliation() == affiliation && !r->is_humongous()) {
 84       if (!r->is_cset() && !has_no_alloc_capacity(r)) {
 85         HeapWord* result = try_allocate_in(r, req, in_new_region);
 86         if (result != NULL) {
 87           return result;
 88         }
 89       }
 90     }
 91   }
 92   return nullptr;
 93 }
 94 
 95 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahRegionAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
 96   for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
 97     // size_t is unsigned, need to dodge underflow when _leftmost = 0
 98     size_t idx = c - 1;
 99     if (is_collector_free(idx)) {
100       ShenandoahHeapRegion* r = _heap->get_region(idx);
101       if (r->affiliation() == affiliation) {
102         HeapWord* result = try_allocate_in(r, req, in_new_region);
103         if (result != NULL) {
104           return result;
105         }
106       }
107     }
108   }
109   return NULL;
110 }
111 
112 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
113   // Scan the bitmap looking for a first fit.
114   //
115   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
116   // we would find the region to allocate at right away.
117   //
118   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
119   // go to the end. This makes application allocation faster, because we would clear lots
120   // of regions from the beginning most of the time.
121   //
122   // Free set maintains mutator and collector views, and normally they allocate in their views only,
123   // unless we special cases for stealing and mixed allocations.
124 
125   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
126 
127   bool allow_new_region = true;
128   switch (req.affiliation()) {
129     case ShenandoahRegionAffiliation::OLD_GENERATION:
130       // Note: unsigned result from adjusted_unaffiliated_regions() will never be less than zero, but it may equal zero.
131       if (_heap->old_generation()->adjusted_unaffiliated_regions() <= 0) {
132         allow_new_region = false;
133       }
134       break;
135 
136     case ShenandoahRegionAffiliation::YOUNG_GENERATION:
137       // Note: unsigned result from adjusted_unaffiliated_regions() will never be less than zero, but it may equal zero.
138       if (_heap->young_generation()->adjusted_unaffiliated_regions() <= 0) {
139         allow_new_region = false;
140       }
141       break;
142 
143     case ShenandoahRegionAffiliation::FREE:
144     default:
145       ShouldNotReachHere();
146       break;
147   }
148 
149   switch (req.type()) {
150     case ShenandoahAllocRequest::_alloc_tlab:
151     case ShenandoahAllocRequest::_alloc_shared: {

152       // Try to allocate in the mutator view
153       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
154         ShenandoahHeapRegion* r = _heap->get_region(idx);
155         if (is_mutator_free(idx) && (allow_new_region || r->affiliation() != ShenandoahRegionAffiliation::FREE)) {
156           // try_allocate_in() increases used if the allocation is successful.
157           HeapWord* result = try_allocate_in(r, req, in_new_region);
158           if (result != NULL) {
159             return result;
160           }
161         }
162       }

163       // There is no recovery. Mutator does not touch collector view at all.
164       break;
165     }
166     case ShenandoahAllocRequest::_alloc_gclab:
167       // GCLABs are for evacuation so we must be in evacuation phase.  If this allocation is successful, increment
168       // the relevant evac_expended rather than used value.
169 
170     case ShenandoahAllocRequest::_alloc_plab:
171       // PLABs always reside in old-gen and are only allocated during evacuation phase.
172 
173     case ShenandoahAllocRequest::_alloc_shared_gc: {
174       // First try to fit into a region that is already in use in the same generation.
175       HeapWord* result;
176       if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
177         // TODO: this is a work around to address a deficiency in FreeSet representation.  A better solution fixes
178         // the FreeSet implementation to deal more efficiently with old-gen regions as being in the "collector free set"
179         result = allocate_with_old_affiliation(req, in_new_region);
180       } else {
181         result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
182       }
183       if (result != NULL) {
184         return result;
185       }
186       if (allow_new_region) {
187         // Then try a free region that is dedicated to GC allocations.
188         result = allocate_with_affiliation(FREE, req, in_new_region);
189         if (result != NULL) {
190           return result;
191         }
192       }
193 
194       // No dice. Can we borrow space from mutator view?
195       if (!ShenandoahEvacReserveOverflow) {
196         return NULL;
197       }
198 
199       if (allow_new_region) {
200         // Try to steal an empty region from the mutator view.
201         for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
202           size_t idx = c - 1;
203           if (is_mutator_free(idx)) {
204             ShenandoahHeapRegion* r = _heap->get_region(idx);
205             if (can_allocate_from(r)) {
206               flip_to_gc(r);
207               HeapWord *result = try_allocate_in(r, req, in_new_region);
208               if (result != NULL) {
209                 return result;
210               }
211             }
212           }
213         }
214       }
215 
216       // No dice. Do not try to mix mutator and GC allocations, because
217       // URWM moves due to GC allocations would expose unparsable mutator
218       // allocations.

219       break;
220     }
221     default:
222       ShouldNotReachHere();
223   }

224   return NULL;
225 }
226 
227 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
228   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
229 
230   if (_heap->is_concurrent_weak_root_in_progress() &&
231       r->is_trash()) {
232     return NULL;
233   }

234   try_recycle_trashed(r);
235   if (r->affiliation() == ShenandoahRegionAffiliation::FREE) {
236     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
237     r->set_affiliation(req.affiliation());
238     if (r->is_old()) {
239       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
240       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
241       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
242       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
243       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
244       // coalesce-and-fill processing.
245       r->end_preemptible_coalesce_and_fill();
246       _heap->clear_cards_for(r);
247     }
248 
249     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
250     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
251 
252     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
253     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
254     log_debug(gc)("NOT clearing bitmap for region " SIZE_FORMAT ", top_bitmap: "
255                   PTR_FORMAT " at transition from FREE to %s",
256                   r->index(), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
257   } else if (r->affiliation() != req.affiliation()) {
258     return NULL;
259   }
260 
261   in_new_region = r->is_empty();
262   HeapWord* result = NULL;
263   size_t size = req.size();
264 
265   // req.size() is in words, r->free() is in bytes.
266   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
267     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
268       // Need to assure that plabs are aligned on multiple of card region.
269       size_t free = r->free();
270       size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift();
271       if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
272         // We'll have to add another card's memory to the padding
273         if (usable_free > CardTable::card_size()) {
274           usable_free -= CardTable::card_size();
275         } else {
276           assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
277         }
278       }
279       free /= HeapWordSize;
280       usable_free /= HeapWordSize;
281       size_t remnant = size % CardTable::card_size_in_words();
282       if (remnant > 0) {
283         // Since we have Elastic TLABs, align size up.  This is consistent with aligning min_size up.
284         size = size - remnant + CardTable::card_size_in_words();
285       }
286       if (size > usable_free) {
287         size = usable_free;
288         assert(size % CardTable::card_size_in_words() == 0, "usable_free is a multiple of card table size");
289       }
290 
291       size_t adjusted_min_size = req.min_size();
292       remnant = adjusted_min_size % CardTable::card_size_in_words();
293       if (remnant > 0) {
294         // Round up adjusted_min_size to a multiple of alignment size
295         adjusted_min_size = adjusted_min_size - remnant + CardTable::card_size_in_words();
296       }
297       if (size >= adjusted_min_size) {
298         result = r->allocate_aligned(size, req, CardTable::card_size());
299         assert(result != nullptr, "Allocation cannot fail");
300         size = req.actual_size();
301         assert(r->top() <= r->end(), "Allocation cannot span end of region");
302         // actual_size() will be set to size below.
303         assert((result == nullptr) || (size % CardTable::card_size_in_words() == 0),
304                "PLAB size must be multiple of card size");
305         assert((result == nullptr) || (((uintptr_t) result) % CardTable::card_size_in_words() == 0),
306                "PLAB start must align with card boundary");
307         if (free > usable_free) {
308           // Account for the alignment padding
309           size_t padding = (free - usable_free) * HeapWordSize;
310           increase_used(padding);
311           assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
312           _heap->old_generation()->increase_used(padding);
313           // For verification consistency, we need to report this padding to _heap
314           _heap->increase_used(padding);
315         }
316       }
317       // Otherwise, leave result == NULL because the adjusted size is smaller than min size.
318     } else {
319       // This is a GCLAB or a TLAB allocation
320       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
321       if (size > free) {
322         size = free;
323       }
324       if (size >= req.min_size()) {
325         result = r->allocate(size, req);
326         if (result != nullptr) {
327           // Record actual allocation size
328           req.set_actual_size(size);
329         }
330         assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
331       } else {
332         log_info(gc, ergo)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
333                            " because min_size() is " SIZE_FORMAT, req.size(), r->index(), size, req.min_size());
334       }
335     }
336   } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) {
337     // inelastic PLAB
338     size_t free = r->free();
339     size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift();
340     free /= HeapWordSize;
341     usable_free /= HeapWordSize;
342     if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
343       // We'll have to add another card's memory to the padding
344       if (usable_free > CardTable::card_size_in_words()) {
345         usable_free -= CardTable::card_size_in_words();
346       } else {
347         assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
348       }
349     }
350     assert(size % CardTable::card_size_in_words() == 0, "PLAB size must be multiple of remembered set card size");
351     if (size <= usable_free) {
352       result = r->allocate_aligned(size, req, CardTable::card_size());
353       size = req.actual_size();
354       assert(result != nullptr, "Allocation cannot fail");
355       assert(r->top() <= r->end(), "Allocation cannot span end of region");
356       assert(req.actual_size() % CardTable::card_size_in_words() == 0, "PLAB start must align with card boundary");
357       assert(((uintptr_t) result) % CardTable::card_size_in_words() == 0, "PLAB start must align with card boundary");
358       if (free > usable_free) {
359         // Account for the alignment padding
360         size_t padding = (free - usable_free) * HeapWordSize;
361         increase_used(padding);
362         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
363         _heap->old_generation()->increase_used(padding);
364         // For verification consistency, we need to report this padding to _heap
365         _heap->increase_used(padding);
366       }
367     }
368   } else {
369     result = r->allocate(size, req);
370     if (result != nullptr) {
371       // Record actual allocation size
372       req.set_actual_size(size);
373     }
374   }
375 
376   if (result != NULL) {
377     // Allocation successful, bump stats:
378     if (req.is_mutator_alloc()) {
379       // Mutator allocations always pull from young gen.
380       _heap->young_generation()->increase_used(size * HeapWordSize);
381       increase_used(size * HeapWordSize);
382     } else {
383       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
384 
385       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
386       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
387       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
388       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
389       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
390       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
391       // next evacuation pass.
392       r->set_update_watermark(r->top());
393 
394       if (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
395         _heap->young_generation()->increase_used(size * HeapWordSize);
396       } else {
397         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "GC Alloc was not YOUNG so must be OLD");
398         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
399         _heap->old_generation()->increase_used(size * HeapWordSize);
400         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
401       }
402     }
403   }

404   if (result == NULL || has_no_alloc_capacity(r)) {
405     // Region cannot afford this or future allocations. Retire it.
406     //
407     // While this seems a bit harsh, especially in the case when this large allocation does not
408     // fit, but the next small one would, we are risking to inflate scan times when lots of
409     // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.
410     // TODO: Record first fully-empty region, and use that for large allocations and/or organize
411     // available free segments within regions for more efficient searches for "good fit".
412 
413     // Record the remainder as allocation waste
414     if (req.is_mutator_alloc()) {
415       size_t waste = r->free();
416       if (waste > 0) {
417         increase_used(waste);
418         _heap->generation_for(req.affiliation())->increase_allocated(waste);
419         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
420       }
421     }
422 
423     size_t num = r->index();
424     _collector_free_bitmap.clear_bit(num);
425     _mutator_free_bitmap.clear_bit(num);
426     // Touched the bounds? Need to update:
427     if (touches_bounds(num)) {
428       adjust_bounds();
429     }
430     assert_bounds();
431   }
432   return result;
433 }
434 
435 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
436   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
437 }
438 

453     _mutator_leftmost++;
454   }
455   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
456     _mutator_rightmost--;
457   }
458   // Rewind both collector bounds until the next bit.
459   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
460     _collector_leftmost++;
461   }
462   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
463     _collector_rightmost--;
464   }
465 }
466 
467 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
468   shenandoah_assert_heaplocked();
469 
470   size_t words_size = req.size();
471   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
472 
473   assert(req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION, "Humongous regions always allocated in YOUNG");
474   size_t avail_young_regions = _heap->young_generation()->adjusted_unaffiliated_regions();
475 
476   // No regions left to satisfy allocation, bye.
477   if (num > mutator_count() || (num > avail_young_regions)) {
478     return NULL;
479   }
480 
481   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
482   // inclusive. Contiguous allocations are biased to the beginning.
483 
484   size_t beg = _mutator_leftmost;
485   size_t end = beg;
486 
487   while (true) {
488     if (end >= _max) {
489       // Hit the end, goodbye
490       return NULL;
491     }
492 
493     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
494     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
495     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
496       end++;
497       beg = end;
498       continue;
499     }
500 
501     if ((end - beg + 1) == num) {
502       // found the match
503       break;
504     }
505 
506     end++;
507   };
508 
509   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
510   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
511 
512   // Initialize regions:
513   for (size_t i = beg; i <= end; i++) {
514     ShenandoahHeapRegion* r = _heap->get_region(i);
515     try_recycle_trashed(r);
516 
517     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
518     assert(r->is_empty(), "Should be empty");
519 
520     if (i == beg) {
521       r->make_humongous_start();
522     } else {
523       r->make_humongous_cont();
524     }
525 
526     // Trailing region may be non-full, record the remainder there
527     size_t used_words;
528     if ((i == end) && (remainder != 0)) {
529       used_words = remainder;
530     } else {
531       used_words = ShenandoahHeapRegion::region_size_words();
532     }
533 
534     r->set_affiliation(req.affiliation());
535     r->set_update_watermark(r->bottom());
536     r->set_top(r->bottom());    // Set top to bottom so we can capture TAMS
537     ctx->capture_top_at_mark_start(r);
538     r->set_top(r->bottom() + used_words); // Then change top to reflect allocation of humongous object.
539     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
540     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
541 
542     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
543     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
544     // ctx->clear_bitmap(r);
545     log_debug(gc)("NOT clearing bitmap for Humongous region [" PTR_FORMAT ", " PTR_FORMAT "], top_bitmap: "
546                   PTR_FORMAT " at transition from FREE to %s",
547                   p2i(r->bottom()), p2i(r->end()), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
548 
549     _mutator_free_bitmap.clear_bit(r->index());
550   }
551 
552   // While individual regions report their true use, all humongous regions are
553   // marked used in the free set.
554   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
555   if (req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
556     _heap->young_generation()->increase_used(words_size * HeapWordSize);
557   } else if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
558     _heap->old_generation()->increase_used(words_size * HeapWordSize);
559   }
560 
561   if (remainder != 0) {
562     // Record this remainder as allocation waste
563     size_t waste = ShenandoahHeapRegion::region_size_words() - remainder;
564     _heap->notify_mutator_alloc_words(waste, true);
565     _heap->generation_for(req.affiliation())->increase_allocated(waste * HeapWordSize);
566   }
567 
568   // Allocated at left/rightmost? Move the bounds appropriately.
569   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
570     adjust_bounds();
571   }
572   assert_bounds();

573   req.set_actual_size(words_size);
574   return _heap->get_region(beg)->bottom();
575 }
576 
577 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
578   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
579 }
580 
581 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
582   if (r->is_trash()) {
583     // This would be recycled on allocation path
584     return ShenandoahHeapRegion::region_size_bytes();
585   } else {
586     return r->free();
587   }
588 }
589 
590 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
591   return alloc_capacity(r) == 0;
592 }
593 
594 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
595   if (r->is_trash()) {
596     _heap->decrease_used(r->used());
597     r->recycle();
598   }
599 }
600 
601 void ShenandoahFreeSet::recycle_trash() {
602   // lock is not reentrable, check we don't have it
603   shenandoah_assert_not_heaplocked();

604   for (size_t i = 0; i < _heap->num_regions(); i++) {
605     ShenandoahHeapRegion* r = _heap->get_region(i);
606     if (r->is_trash()) {
607       ShenandoahHeapLocker locker(_heap->lock());
608       try_recycle_trashed(r);
609     }
610     SpinPause(); // allow allocators to take the lock
611   }
612 }
613 
614 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
615   size_t idx = r->index();
616 
617   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
618   assert(can_allocate_from(r), "Should not be allocated");
619 
620   _mutator_free_bitmap.clear_bit(idx);
621   _collector_free_bitmap.set_bit(idx);
622   _collector_leftmost = MIN2(idx, _collector_leftmost);
623   _collector_rightmost = MAX2(idx, _collector_rightmost);
624 
625   _capacity -= alloc_capacity(r);
626 
627   if (touches_bounds(idx)) {
628     adjust_bounds();
629   }
630   assert_bounds();
631 
632   // We do not ensure that the region is no longer trash,
633   // relying on try_allocate_in(), which always comes next,
634   // to recycle trash before attempting to allocate anything in the region.
635 }
636 
637 void ShenandoahFreeSet::clear() {
638   shenandoah_assert_heaplocked();
639   clear_internal();
640 }
641 
642 void ShenandoahFreeSet::clear_internal() {
643   _mutator_free_bitmap.clear();
644   _collector_free_bitmap.clear();
645   _mutator_leftmost = _max;
646   _mutator_rightmost = 0;
647   _collector_leftmost = _max;
648   _collector_rightmost = 0;
649   _capacity = 0;
650   _used = 0;
651 }
652 
653 void ShenandoahFreeSet::rebuild() {
654   shenandoah_assert_heaplocked();
655   clear();
656 
657   log_debug(gc)("Rebuilding FreeSet");
658   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
659     ShenandoahHeapRegion* region = _heap->get_region(idx);
660     if (region->is_alloc_allowed() || region->is_trash()) {
661       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
662 
663       // Do not add regions that would surely fail allocation
664       if (has_no_alloc_capacity(region)) continue;
665 
666       _capacity += alloc_capacity(region);
667       assert(_used <= _capacity, "must not use more than we have");
668 
669       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
670       _mutator_free_bitmap.set_bit(idx);
671 
672       log_debug(gc)("  Setting Region " SIZE_FORMAT " _mutator_free_bitmap bit to true", idx);
673     }
674   }
675 
676   // Evac reserve: reserve trailing space for evacuations
677   if (!_heap->mode()->is_generational()) {
678     size_t to_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
679     reserve_regions(to_reserve);
680   } else {
681     size_t young_reserve = (_heap->young_generation()->max_capacity() / 100) * ShenandoahEvacReserve;
682     // Note that all allocations performed from old-gen are performed by GC, generally using PLABs for both
683     // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
684     // which is reserved for promotion is enforced using thread-local variables that prescribe intentons within
685     // each PLAB.  We do not reserve any of old-gen memory in order to facilitate the loaning of old-gen memory
686     // to young-gen purposes.
687     size_t old_reserve = 0;
688     size_t to_reserve = young_reserve + old_reserve;
689     reserve_regions(to_reserve);
690   }
691 
692   recompute_bounds();
693   assert_bounds();
694 }
695 
696 void ShenandoahFreeSet::reserve_regions(size_t to_reserve) {
697   size_t reserved = 0;
698 
699   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
700     if (reserved >= to_reserve) break;
701 
702     ShenandoahHeapRegion* region = _heap->get_region(idx);
703     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
704       _mutator_free_bitmap.clear_bit(idx);
705       _collector_free_bitmap.set_bit(idx);
706       size_t ac = alloc_capacity(region);
707       _capacity -= ac;
708       reserved += ac;
709       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
710     }
711   }



712 }
713 
714 void ShenandoahFreeSet::log_status() {
715   shenandoah_assert_heaplocked();
716 
717   LogTarget(Info, gc, ergo) lt;
718   if (lt.is_enabled()) {
719     ResourceMark rm;
720     LogStream ls(lt);
721 
722     {
723       size_t last_idx = 0;
724       size_t max = 0;
725       size_t max_contig = 0;
726       size_t empty_contig = 0;
727 
728       size_t total_used = 0;
729       size_t total_free = 0;
730       size_t total_free_ext = 0;
731 

789       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
790         if (is_collector_free(idx)) {
791           ShenandoahHeapRegion *r = _heap->get_region(idx);
792           size_t free = alloc_capacity(r);
793           max = MAX2(max, free);
794           total_free += free;
795         }
796       }
797 
798       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
799                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
800                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
801     }
802   }
803 }
804 
805 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
806   shenandoah_assert_heaplocked();
807   assert_bounds();
808 
809   // Allocation request is known to satisfy all memory budgeting constraints.
810   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
811     switch (req.type()) {
812       case ShenandoahAllocRequest::_alloc_shared:
813       case ShenandoahAllocRequest::_alloc_shared_gc:
814         in_new_region = true;
815         return allocate_contiguous(req);
816       case ShenandoahAllocRequest::_alloc_plab:
817       case ShenandoahAllocRequest::_alloc_gclab:
818       case ShenandoahAllocRequest::_alloc_tlab:
819         in_new_region = false;
820         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
821                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
822         return NULL;
823       default:
824         ShouldNotReachHere();
825         return NULL;
826     }
827   } else {
828     return allocate_single(req, in_new_region);
829   }
830 }
831 
832 size_t ShenandoahFreeSet::unsafe_peek_free() const {
833   // Deliberately not locked, this method is unsafe when free set is modified.
834 
835   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
836     if (index < _max && is_mutator_free(index)) {
< prev index next >