< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page

  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/tlab_globals.hpp"

 27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"


 31 #include "logging/logStream.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "runtime/orderAccess.hpp"
 34 
 35 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 36   _heap(heap),
 37   _mutator_free_bitmap(max_regions, mtGC),
 38   _collector_free_bitmap(max_regions, mtGC),
 39   _max(max_regions)
 40 {
 41   clear_internal();
 42 }
 43 
 44 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
 45   shenandoah_assert_heaplocked();
 46   _used += num_bytes;
 47 
 48   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
 49          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
 50 }
 51 
 52 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
 53   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 54           idx, _max, _mutator_leftmost, _mutator_rightmost);
 55   return _mutator_free_bitmap.at(idx);
 56 }
 57 
 58 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
 59   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 60           idx, _max, _collector_leftmost, _collector_rightmost);
 61   return _collector_free_bitmap.at(idx);
 62 }
 63 

















 64 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 65   // Scan the bitmap looking for a first fit.
 66   //
 67   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 68   // we would find the region to allocate at right away.
 69   //
 70   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
 71   // go to the end. This makes application allocation faster, because we would clear lots
 72   // of regions from the beginning most of the time.
 73   //
 74   // Free set maintains mutator and collector views, and normally they allocate in their views only,
 75   // unless we special cases for stealing and mixed allocations.
 76 


 77   switch (req.type()) {
 78     case ShenandoahAllocRequest::_alloc_tlab:
 79     case ShenandoahAllocRequest::_alloc_shared: {
 80 
 81       // Try to allocate in the mutator view
 82       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 83         if (is_mutator_free(idx)) {

 84           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 85           if (result != NULL) {
 86             return result;
 87           }
 88         }
 89       }
 90 
 91       // There is no recovery. Mutator does not touch collector view at all.
 92       break;
 93     }
 94     case ShenandoahAllocRequest::_alloc_gclab:
 95     case ShenandoahAllocRequest::_alloc_shared_gc: {
 96       // size_t is unsigned, need to dodge underflow when _leftmost = 0
 97 
 98       // Fast-path: try to allocate in the collector view first
 99       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
100         size_t idx = c - 1;
101         if (is_collector_free(idx)) {
102           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
103           if (result != NULL) {
104             return result;
105           }
106         }




107       }
108 
109       // No dice. Can we borrow space from mutator view?
110       if (!ShenandoahEvacReserveOverflow) {
111         return NULL;
112       }
113 
114       // Try to steal the empty region from the mutator view
115       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
116         size_t idx = c - 1;
117         if (is_mutator_free(idx)) {
118           ShenandoahHeapRegion* r = _heap->get_region(idx);
119           if (can_allocate_from(r)) {
120             flip_to_gc(r);
121             HeapWord *result = try_allocate_in(r, req, in_new_region);
122             if (result != NULL) {
123               return result;
124             }
125           }
126         }
127       }
128 
129       // No dice. Do not try to mix mutator and GC allocations, because
130       // URWM moves due to GC allocations would expose unparsable mutator
131       // allocations.
132 
133       break;
134     }
135     default:
136       ShouldNotReachHere();
137   }
138 
139   return NULL;
140 }
141 
142 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
143   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
144 
145   if (_heap->is_concurrent_weak_root_in_progress() &&
146       r->is_trash()) {
147     return NULL;
148   }
149 
150   try_recycle_trashed(r);
151 




























152   in_new_region = r->is_empty();
153 
154   HeapWord* result = NULL;
155   size_t size = req.size();
156 

157   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
158     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
159     if (size > free) {
160       size = free;
































161     }
162     if (size >= req.min_size()) {
163       result = r->allocate(size, req.type());
164       assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);



















165     }
166   } else {
167     result = r->allocate(size, req.type());
168   }
169 
170   if (result != NULL) {
171     // Allocation successful, bump stats:
172     if (req.is_mutator_alloc()) {
173       increase_used(size * HeapWordSize);
174     }
175 
176     // Record actual allocation size
177     req.set_actual_size(size);
178 
179     if (req.is_gc_alloc()) {














180       r->set_update_watermark(r->top());









181     }
182   }
183 
184   if (result == NULL || has_no_alloc_capacity(r)) {
185     // Region cannot afford this or future allocations. Retire it.
186     //
187     // While this seems a bit harsh, especially in the case when this large allocation does not
188     // fit, but the next small one would, we are risking to inflate scan times when lots of
189     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
190     // TODO: Record first fully-empty region, and use that for large allocations

191 
192     // Record the remainder as allocation waste
193     if (req.is_mutator_alloc()) {
194       size_t waste = r->free();
195       if (waste > 0) {
196         increase_used(waste);

197         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
198       }
199     }
200 
201     size_t num = r->index();
202     _collector_free_bitmap.clear_bit(num);
203     _mutator_free_bitmap.clear_bit(num);
204     // Touched the bounds? Need to update:
205     if (touches_bounds(num)) {
206       adjust_bounds();
207     }
208     assert_bounds();
209   }
210   return result;
211 }
212 
213 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
214   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
215 }
216 

265       return NULL;
266     }
267 
268     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
269     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
270     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
271       end++;
272       beg = end;
273       continue;
274     }
275 
276     if ((end - beg + 1) == num) {
277       // found the match
278       break;
279     }
280 
281     end++;
282   };
283 
284   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();

285 
286   // Initialize regions:
287   for (size_t i = beg; i <= end; i++) {
288     ShenandoahHeapRegion* r = _heap->get_region(i);
289     try_recycle_trashed(r);
290 
291     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
292     assert(r->is_empty(), "Should be empty");
293 
294     if (i == beg) {
295       r->make_humongous_start();
296     } else {
297       r->make_humongous_cont();
298     }
299 
300     // Trailing region may be non-full, record the remainder there
301     size_t used_words;
302     if ((i == end) && (remainder != 0)) {
303       used_words = remainder;
304     } else {
305       used_words = ShenandoahHeapRegion::region_size_words();
306     }
307 
308     r->set_top(r->bottom() + used_words);













309 
310     _mutator_free_bitmap.clear_bit(r->index());
311   }
312 
313   // While individual regions report their true use, all humongous regions are
314   // marked used in the free set.
315   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);





316 
317   if (remainder != 0) {
318     // Record this remainder as allocation waste
319     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);


320   }
321 
322   // Allocated at left/rightmost? Move the bounds appropriately.
323   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
324     adjust_bounds();
325   }
326   assert_bounds();
327 
328   req.set_actual_size(words_size);
329   return _heap->get_region(beg)->bottom();
330 }
331 
332 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
333   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
334 }
335 
336 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
337   if (r->is_trash()) {
338     // This would be recycled on allocation path
339     return ShenandoahHeapRegion::region_size_bytes();
340   } else {
341     return r->free();
342   }
343 }
344 
345 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
346   return alloc_capacity(r) == 0;
347 }
348 
349 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
350   if (r->is_trash()) {
351     _heap->decrease_used(r->used());
352     r->recycle();
353   }
354 }
355 
356 void ShenandoahFreeSet::recycle_trash() {
357   // lock is not reentrable, check we don't have it
358   shenandoah_assert_not_heaplocked();
359 
360   for (size_t i = 0; i < _heap->num_regions(); i++) {
361     ShenandoahHeapRegion* r = _heap->get_region(i);
362     if (r->is_trash()) {
363       ShenandoahHeapLocker locker(_heap->lock());
364       try_recycle_trashed(r);
365     }
366     SpinPause(); // allow allocators to take the lock
367   }
368 }
369 
370 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
371   size_t idx = r->index();
372 
373   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
374   assert(can_allocate_from(r), "Should not be allocated");
375 
376   _mutator_free_bitmap.clear_bit(idx);
377   _collector_free_bitmap.set_bit(idx);
378   _collector_leftmost = MIN2(idx, _collector_leftmost);
379   _collector_rightmost = MAX2(idx, _collector_rightmost);
380 
381   _capacity -= alloc_capacity(r);
382 
383   if (touches_bounds(idx)) {
384     adjust_bounds();
385   }
386   assert_bounds();




387 }
388 
389 void ShenandoahFreeSet::clear() {
390   shenandoah_assert_heaplocked();
391   clear_internal();
392 }
393 
394 void ShenandoahFreeSet::clear_internal() {
395   _mutator_free_bitmap.clear();
396   _collector_free_bitmap.clear();
397   _mutator_leftmost = _max;
398   _mutator_rightmost = 0;
399   _collector_leftmost = _max;
400   _collector_rightmost = 0;
401   _capacity = 0;
402   _used = 0;
403 }
404 
405 void ShenandoahFreeSet::rebuild() {
406   shenandoah_assert_heaplocked();
407   clear();
408 

409   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
410     ShenandoahHeapRegion* region = _heap->get_region(idx);
411     if (region->is_alloc_allowed() || region->is_trash()) {
412       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
413 
414       // Do not add regions that would surely fail allocation
415       if (has_no_alloc_capacity(region)) continue;
416 
417       _capacity += alloc_capacity(region);
418       assert(_used <= _capacity, "must not use more than we have");
419 
420       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
421       _mutator_free_bitmap.set_bit(idx);


422     }
423   }
424 
425   // Evac reserve: reserve trailing space for evacuations
426   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
427   size_t reserved = 0;
428 
429   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
430     if (reserved >= to_reserve) break;
431 
432     ShenandoahHeapRegion* region = _heap->get_region(idx);
433     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
434       _mutator_free_bitmap.clear_bit(idx);
435       _collector_free_bitmap.set_bit(idx);
436       size_t ac = alloc_capacity(region);
437       _capacity -= ac;
438       reserved += ac;

439     }
440   }
441 
442   recompute_bounds();
443   assert_bounds();
444 }
445 
446 void ShenandoahFreeSet::log_status() {
447   shenandoah_assert_heaplocked();
448 
449   LogTarget(Info, gc, ergo) lt;
450   if (lt.is_enabled()) {
451     ResourceMark rm;
452     LogStream ls(lt);
453 
454     {
455       size_t last_idx = 0;
456       size_t max = 0;
457       size_t max_contig = 0;
458       size_t empty_contig = 0;

521       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
522         if (is_collector_free(idx)) {
523           ShenandoahHeapRegion *r = _heap->get_region(idx);
524           size_t free = alloc_capacity(r);
525           max = MAX2(max, free);
526           total_free += free;
527         }
528       }
529 
530       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
531                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
532                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
533     }
534   }
535 }
536 
537 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
538   shenandoah_assert_heaplocked();
539   assert_bounds();
540 

541   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
542     switch (req.type()) {
543       case ShenandoahAllocRequest::_alloc_shared:
544       case ShenandoahAllocRequest::_alloc_shared_gc:
545         in_new_region = true;
546         return allocate_contiguous(req);

547       case ShenandoahAllocRequest::_alloc_gclab:
548       case ShenandoahAllocRequest::_alloc_tlab:
549         in_new_region = false;
550         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
551                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
552         return NULL;
553       default:
554         ShouldNotReachHere();
555         return NULL;
556     }
557   } else {
558     return allocate_single(req, in_new_region);
559   }
560 }
561 
562 size_t ShenandoahFreeSet::unsafe_peek_free() const {
563   // Deliberately not locked, this method is unsafe when free set is modified.
564 
565   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
566     if (index < _max && is_mutator_free(index)) {

  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/tlab_globals.hpp"
 27 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 31 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 32 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 33 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 34 #include "logging/logStream.hpp"
 35 #include "memory/resourceArea.hpp"
 36 #include "runtime/orderAccess.hpp"
 37 
 38 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 39   _heap(heap),
 40   _mutator_free_bitmap(max_regions, mtGC),
 41   _collector_free_bitmap(max_regions, mtGC),
 42   _max(max_regions)
 43 {
 44   clear_internal();
 45 }
 46 
 47 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
 48   shenandoah_assert_heaplocked();
 49   _used += num_bytes;
 50 
 51   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
 52          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
 53 }
 54 
 55 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
 56   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 57           idx, _max, _mutator_leftmost, _mutator_rightmost);
 58   return _mutator_free_bitmap.at(idx);
 59 }
 60 
 61 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
 62   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
 63           idx, _max, _collector_leftmost, _collector_rightmost);
 64   return _collector_free_bitmap.at(idx);
 65 }
 66 
 67 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahRegionAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
 68   for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
 69     // size_t is unsigned, need to dodge underflow when _leftmost = 0
 70     size_t idx = c - 1;
 71     if (is_collector_free(idx)) {
 72       ShenandoahHeapRegion* r = _heap->get_region(idx);
 73       if (r->affiliation() == affiliation) {
 74         HeapWord* result = try_allocate_in(r, req, in_new_region);
 75         if (result != NULL) {
 76           return result;
 77         }
 78       }
 79     }
 80   }
 81   return NULL;
 82 }
 83 
 84 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 85   // Scan the bitmap looking for a first fit.
 86   //
 87   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 88   // we would find the region to allocate at right away.
 89   //
 90   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
 91   // go to the end. This makes application allocation faster, because we would clear lots
 92   // of regions from the beginning most of the time.
 93   //
 94   // Free set maintains mutator and collector views, and normally they allocate in their views only,
 95   // unless we special cases for stealing and mixed allocations.
 96 
 97   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 98 
 99   switch (req.type()) {
100     case ShenandoahAllocRequest::_alloc_tlab:
101     case ShenandoahAllocRequest::_alloc_shared: {

102       // Try to allocate in the mutator view
103       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
104         if (is_mutator_free(idx)) {
105           // try_allocate_in() increases used if the allocation is successful.
106           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
107           if (result != NULL) {
108             return result;
109           }
110         }
111       }
112 
113       // There is no recovery. Mutator does not touch collector view at all.
114       break;
115     }
116     case ShenandoahAllocRequest::_alloc_gclab:
117       // GCLABs are for evacuation so we must be in evacuation phase.  If this allocation is successful, increment
118       // the relevant evac_expended rather than used value.
119 
120     case ShenandoahAllocRequest::_alloc_plab:
121       // PLABs always reside in old-gen and are only allocated during evacuation phase.
122 
123     case ShenandoahAllocRequest::_alloc_shared_gc: {
124       // First try to fit into a region that is already in use in the same generation.
125       HeapWord* result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
126       if (result != NULL) {
127         return result;
128       }
129       // Then try a free region that is dedicated to GC allocations.
130       result = allocate_with_affiliation(FREE, req, in_new_region);
131       if (result != NULL) {
132         return result;
133       }
134 
135       // No dice. Can we borrow space from mutator view?
136       if (!ShenandoahEvacReserveOverflow) {
137         return NULL;
138       }
139 
140       // Try to steal an empty region from the mutator view.
141       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
142         size_t idx = c - 1;
143         if (is_mutator_free(idx)) {
144           ShenandoahHeapRegion* r = _heap->get_region(idx);
145           if (can_allocate_from(r)) {
146             flip_to_gc(r);
147             HeapWord *result = try_allocate_in(r, req, in_new_region);
148             if (result != NULL) {
149               return result;
150             }
151           }
152         }
153       }
154 
155       // No dice. Do not try to mix mutator and GC allocations, because
156       // URWM moves due to GC allocations would expose unparsable mutator
157       // allocations.

158       break;
159     }
160     default:
161       ShouldNotReachHere();
162   }

163   return NULL;
164 }
165 
166 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
167   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
168 
169   if (_heap->is_concurrent_weak_root_in_progress() &&
170       r->is_trash()) {
171     return NULL;
172   }
173 
174   try_recycle_trashed(r);
175 
176   if (r->affiliation() == ShenandoahRegionAffiliation::FREE) {
177     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
178 
179     r->set_affiliation(req.affiliation());
180 
181     if (r->is_old()) {
182       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
183       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
184       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
185       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
186       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
187       // coalesce-and-fill processing.
188       r->end_preemptible_coalesce_and_fill();
189       _heap->clear_cards_for(r);
190     }
191 
192     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
193     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
194 
195     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
196     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
197     log_debug(gc)("NOT clearing bitmap for region " SIZE_FORMAT ", top_bitmap: "
198                   PTR_FORMAT " at transition from FREE to %s",
199                   r->index(), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
200   } else if (r->affiliation() != req.affiliation()) {
201     return NULL;
202   }
203 
204   in_new_region = r->is_empty();
205 
206   HeapWord* result = NULL;
207   size_t size = req.size();
208 
209   // req.size() is in words, r->free() is in bytes.
210   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
211     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
212       // Need to assure that plabs are aligned on multiple of card region.
213       size_t free = r->free();
214       size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift();
215       if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
216         // We'll have to add another card's memory to the padding
217         usable_free -= CardTable::card_size();
218       }
219       free /= HeapWordSize;
220       usable_free /= HeapWordSize;
221       if (size > usable_free) {
222         size = usable_free;
223       }
224       if (size >= req.min_size()) {
225         result = r->allocate_aligned(size, req, CardTable::card_size());
226         if (result != nullptr && free > usable_free) {
227           // Account for the alignment padding
228           size_t padding = (free - usable_free) * HeapWordSize;
229           increase_used(padding);
230           assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
231           _heap->old_generation()->increase_used(padding);
232           // For verification consistency, we need to report this padding to _heap
233           _heap->increase_used(padding);
234         }
235       }
236     } else {
237       // This is a GCLAB or a TLAB allocation
238       size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
239       if (size > free) {
240         size = free;
241       }
242       if (size >= req.min_size()) {
243         result = r->allocate(size, req);
244         assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
245       }
246     }
247   } else if (req.is_lab_alloc() && req.type() == ShenandoahAllocRequest::_alloc_plab) {
248     size_t free = r->free();
249     size_t usable_free = (free / CardTable::card_size()) << CardTable::card_shift();
250     free /= HeapWordSize;
251     usable_free /= HeapWordSize;
252     if ((free != usable_free) && (free - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
253       // We'll have to add another card's memory to the padding
254       usable_free -= CardTable::card_size();
255     }
256     if (size <= usable_free) {
257       assert(size % CardTable::card_size_in_words() == 0, "PLAB size must be multiple of remembered set card size");
258 
259       result = r->allocate_aligned(size, req, CardTable::card_size());
260       if (result != nullptr) {
261         // Account for the alignment padding
262         size_t padding = (free - usable_free) * HeapWordSize;
263         increase_used(padding);
264         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "All PLABs reside in old-gen");
265         _heap->old_generation()->increase_used(padding);
266         // For verification consistency, we need to report this padding to _heap
267         _heap->increase_used(padding);
268       }
269     }
270   } else {
271     result = r->allocate(size, req);
272   }
273 
274   if (result != NULL) {





275     // Record actual allocation size
276     req.set_actual_size(size);
277 
278     // Allocation successful, bump stats:
279     if (req.is_mutator_alloc()) {
280       // Mutator allocations always pull from young gen.
281       _heap->young_generation()->increase_used(size * HeapWordSize);
282       increase_used(size * HeapWordSize);
283     } else {
284       // assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
285 
286       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
287       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
288       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
289       // TODO: Making a PLAB parsable involves placing a filler object in its remnant memory but does not require
290       // that the PLAB be disabled for all future purposes.  We may want to introduce a new service to make the
291       // PLABs parsable while still allowing the PLAB to serve future allocation requests that arise during the
292       // next evacuation pass.
293       r->set_update_watermark(r->top());
294 
295       if (r->affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
296         _heap->young_generation()->increase_used(size * HeapWordSize);
297       } else {
298         assert(r->affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION, "GC Alloc was not YOUNG so must be OLD");
299         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
300         _heap->old_generation()->increase_used(size * HeapWordSize);
301         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
302       }
303     }
304   }

305   if (result == NULL || has_no_alloc_capacity(r)) {
306     // Region cannot afford this or future allocations. Retire it.
307     //
308     // While this seems a bit harsh, especially in the case when this large allocation does not
309     // fit, but the next small one would, we are risking to inflate scan times when lots of
310     // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.
311     // TODO: Record first fully-empty region, and use that for large allocations and/or organize
312     // available free segments within regions for more efficient searches for "good fit".
313 
314     // Record the remainder as allocation waste
315     if (req.is_mutator_alloc()) {
316       size_t waste = r->free();
317       if (waste > 0) {
318         increase_used(waste);
319         _heap->generation_for(req.affiliation())->increase_allocated(waste);
320         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
321       }
322     }
323 
324     size_t num = r->index();
325     _collector_free_bitmap.clear_bit(num);
326     _mutator_free_bitmap.clear_bit(num);
327     // Touched the bounds? Need to update:
328     if (touches_bounds(num)) {
329       adjust_bounds();
330     }
331     assert_bounds();
332   }
333   return result;
334 }
335 
336 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
337   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
338 }
339 

388       return NULL;
389     }
390 
391     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
392     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
393     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
394       end++;
395       beg = end;
396       continue;
397     }
398 
399     if ((end - beg + 1) == num) {
400       // found the match
401       break;
402     }
403 
404     end++;
405   };
406 
407   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
408   ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
409 
410   // Initialize regions:
411   for (size_t i = beg; i <= end; i++) {
412     ShenandoahHeapRegion* r = _heap->get_region(i);
413     try_recycle_trashed(r);
414 
415     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
416     assert(r->is_empty(), "Should be empty");
417 
418     if (i == beg) {
419       r->make_humongous_start();
420     } else {
421       r->make_humongous_cont();
422     }
423 
424     // Trailing region may be non-full, record the remainder there
425     size_t used_words;
426     if ((i == end) && (remainder != 0)) {
427       used_words = remainder;
428     } else {
429       used_words = ShenandoahHeapRegion::region_size_words();
430     }
431 
432     r->set_affiliation(req.affiliation());
433     r->set_update_watermark(r->bottom());
434     r->set_top(r->bottom());    // Set top to bottom so we can capture TAMS
435     ctx->capture_top_at_mark_start(r);
436     r->set_top(r->bottom() + used_words); // Then change top to reflect allocation of humongous object.
437     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
438     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
439 
440     // Leave top_bitmap alone.  The first time a heap region is put into service, top_bitmap should equal end.
441     // Thereafter, it should represent the upper bound on parts of the bitmap that need to be cleared.
442     // ctx->clear_bitmap(r);
443     log_debug(gc)("NOT clearing bitmap for Humongous region [" PTR_FORMAT ", " PTR_FORMAT "], top_bitmap: "
444                   PTR_FORMAT " at transition from FREE to %s",
445                   p2i(r->bottom()), p2i(r->end()), p2i(ctx->top_bitmap(r)), affiliation_name(req.affiliation()));
446 
447     _mutator_free_bitmap.clear_bit(r->index());
448   }
449 
450   // While individual regions report their true use, all humongous regions are
451   // marked used in the free set.
452   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
453   if (req.affiliation() == ShenandoahRegionAffiliation::YOUNG_GENERATION) {
454     _heap->young_generation()->increase_used(words_size * HeapWordSize);
455   } else if (req.affiliation() == ShenandoahRegionAffiliation::OLD_GENERATION) {
456     _heap->old_generation()->increase_used(words_size * HeapWordSize);
457   }
458 
459   if (remainder != 0) {
460     // Record this remainder as allocation waste
461     size_t waste = ShenandoahHeapRegion::region_size_words() - remainder;
462     _heap->notify_mutator_alloc_words(waste, true);
463     _heap->generation_for(req.affiliation())->increase_allocated(waste * HeapWordSize);
464   }
465 
466   // Allocated at left/rightmost? Move the bounds appropriately.
467   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
468     adjust_bounds();
469   }
470   assert_bounds();
471 
472   req.set_actual_size(words_size);
473   return _heap->get_region(beg)->bottom();
474 }
475 
476 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
477   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
478 }
479 
480 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
481   if (r->is_trash()) {
482     // This would be recycled on allocation path
483     return ShenandoahHeapRegion::region_size_bytes();
484   } else {
485     return r->free();
486   }
487 }
488 
489 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
490   return alloc_capacity(r) == 0;
491 }
492 
493 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
494   if (r->is_trash()) {
495     _heap->decrease_used(r->used());
496     r->recycle();
497   }
498 }
499 
500 void ShenandoahFreeSet::recycle_trash() {
501   // lock is not reentrable, check we don't have it
502   shenandoah_assert_not_heaplocked();

503   for (size_t i = 0; i < _heap->num_regions(); i++) {
504     ShenandoahHeapRegion* r = _heap->get_region(i);
505     if (r->is_trash()) {
506       ShenandoahHeapLocker locker(_heap->lock());
507       try_recycle_trashed(r);
508     }
509     SpinPause(); // allow allocators to take the lock
510   }
511 }
512 
513 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
514   size_t idx = r->index();
515 
516   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
517   assert(can_allocate_from(r), "Should not be allocated");
518 
519   _mutator_free_bitmap.clear_bit(idx);
520   _collector_free_bitmap.set_bit(idx);
521   _collector_leftmost = MIN2(idx, _collector_leftmost);
522   _collector_rightmost = MAX2(idx, _collector_rightmost);
523 
524   _capacity -= alloc_capacity(r);
525 
526   if (touches_bounds(idx)) {
527     adjust_bounds();
528   }
529   assert_bounds();
530 
531   // We do not ensure that the region is no longer trash,
532   // relying on try_allocate_in(), which always comes next,
533   // to recycle trash before attempting to allocate anything in the region.
534 }
535 
536 void ShenandoahFreeSet::clear() {
537   shenandoah_assert_heaplocked();
538   clear_internal();
539 }
540 
541 void ShenandoahFreeSet::clear_internal() {
542   _mutator_free_bitmap.clear();
543   _collector_free_bitmap.clear();
544   _mutator_leftmost = _max;
545   _mutator_rightmost = 0;
546   _collector_leftmost = _max;
547   _collector_rightmost = 0;
548   _capacity = 0;
549   _used = 0;
550 }
551 
552 void ShenandoahFreeSet::rebuild() {
553   shenandoah_assert_heaplocked();
554   clear();
555 
556   log_debug(gc)("Rebuilding FreeSet");
557   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
558     ShenandoahHeapRegion* region = _heap->get_region(idx);
559     if (region->is_alloc_allowed() || region->is_trash()) {
560       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
561 
562       // Do not add regions that would surely fail allocation
563       if (has_no_alloc_capacity(region)) continue;
564 
565       _capacity += alloc_capacity(region);
566       assert(_used <= _capacity, "must not use more than we have");
567 
568       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
569       _mutator_free_bitmap.set_bit(idx);
570 
571       log_debug(gc)("  Setting Region " SIZE_FORMAT " _mutator_free_bitmap bit to true", idx);
572     }
573   }
574 
575   // Evac reserve: reserve trailing space for evacuations
576   size_t to_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
577   size_t reserved = 0;
578 
579   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
580     if (reserved >= to_reserve) break;
581 
582     ShenandoahHeapRegion* region = _heap->get_region(idx);
583     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
584       _mutator_free_bitmap.clear_bit(idx);
585       _collector_free_bitmap.set_bit(idx);
586       size_t ac = alloc_capacity(region);
587       _capacity -= ac;
588       reserved += ac;
589       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
590     }
591   }
592 
593   recompute_bounds();
594   assert_bounds();
595 }
596 
597 void ShenandoahFreeSet::log_status() {
598   shenandoah_assert_heaplocked();
599 
600   LogTarget(Info, gc, ergo) lt;
601   if (lt.is_enabled()) {
602     ResourceMark rm;
603     LogStream ls(lt);
604 
605     {
606       size_t last_idx = 0;
607       size_t max = 0;
608       size_t max_contig = 0;
609       size_t empty_contig = 0;

672       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
673         if (is_collector_free(idx)) {
674           ShenandoahHeapRegion *r = _heap->get_region(idx);
675           size_t free = alloc_capacity(r);
676           max = MAX2(max, free);
677           total_free += free;
678         }
679       }
680 
681       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
682                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
683                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));
684     }
685   }
686 }
687 
688 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
689   shenandoah_assert_heaplocked();
690   assert_bounds();
691 
692   // Allocation request is known to satisfy all memory budgeting constraints.
693   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
694     switch (req.type()) {
695       case ShenandoahAllocRequest::_alloc_shared:
696       case ShenandoahAllocRequest::_alloc_shared_gc:
697         in_new_region = true;
698         return allocate_contiguous(req);
699       case ShenandoahAllocRequest::_alloc_plab:
700       case ShenandoahAllocRequest::_alloc_gclab:
701       case ShenandoahAllocRequest::_alloc_tlab:
702         in_new_region = false;
703         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
704                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
705         return NULL;
706       default:
707         ShouldNotReachHere();
708         return NULL;
709     }
710   } else {
711     return allocate_single(req, in_new_region);
712   }
713 }
714 
715 size_t ShenandoahFreeSet::unsafe_peek_free() const {
716   // Deliberately not locked, this method is unsafe when free set is modified.
717 
718   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
719     if (index < _max && is_mutator_free(index)) {
< prev index next >