863 //
864 // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
865 // available regions. Other potential benefits:
866 // 1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
867 // 2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
868 // late in the GC cycle.
869 idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
870 - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
871 idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
872 - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
873 _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
874 _alloc_bias_weight = INITIAL_ALLOC_BIAS_WEIGHT;
875 }
876 }
877
878 template<typename Iter>
879 HeapWord* ShenandoahFreeSet::allocate_from_regions(Iter& iterator, ShenandoahAllocRequest &req, bool &in_new_region) {
880 for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
881 ShenandoahHeapRegion* r = _heap->get_region(idx);
882 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab) ? req.min_size() : req.size();
883 if (alloc_capacity(r) >= min_size) {
884 HeapWord* result = try_allocate_in(r, req, in_new_region);
885 if (result != nullptr) {
886 return result;
887 }
888 }
889 }
890 return nullptr;
891 }
892
893 HeapWord* ShenandoahFreeSet::allocate_for_collector(ShenandoahAllocRequest &req, bool &in_new_region) {
894 // Fast-path: try to allocate in the collector view first
895 HeapWord* result;
896 result = allocate_from_partition_with_affiliation(req.affiliation(), req, in_new_region);
897 if (result != nullptr) {
898 return result;
899 }
900
901 bool allow_new_region = can_allocate_in_new_region(req);
902 if (allow_new_region) {
903 // Try a free region that is dedicated to GC allocations.
|
863 //
864 // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
865 // available regions. Other potential benefits:
866 // 1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
867 // 2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
868 // late in the GC cycle.
869 idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
870 - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
871 idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
872 - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
873 _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
874 _alloc_bias_weight = INITIAL_ALLOC_BIAS_WEIGHT;
875 }
876 }
877
878 template<typename Iter>
879 HeapWord* ShenandoahFreeSet::allocate_from_regions(Iter& iterator, ShenandoahAllocRequest &req, bool &in_new_region) {
880 for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
881 ShenandoahHeapRegion* r = _heap->get_region(idx);
882 size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab) ? req.min_size() : req.size();
883 if (alloc_capacity(r) >= min_size * HeapWordSize) {
884 HeapWord* result = try_allocate_in(r, req, in_new_region);
885 if (result != nullptr) {
886 return result;
887 }
888 }
889 }
890 return nullptr;
891 }
892
893 HeapWord* ShenandoahFreeSet::allocate_for_collector(ShenandoahAllocRequest &req, bool &in_new_region) {
894 // Fast-path: try to allocate in the collector view first
895 HeapWord* result;
896 result = allocate_from_partition_with_affiliation(req.affiliation(), req, in_new_region);
897 if (result != nullptr) {
898 return result;
899 }
900
901 bool allow_new_region = can_allocate_in_new_region(req);
902 if (allow_new_region) {
903 // Try a free region that is dedicated to GC allocations.
|