< prev index next > src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
Print this page
/*
* Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
*/
#include "precompiled.hpp"
#include "gc/shared/tlab_globals.hpp"
+ #include "gc/shenandoah/shenandoahAffiliation.hpp"
+ #include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
+ #include "gc/shenandoah/shenandoahOldGeneration.hpp"
+ #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
+ #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
+ #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
+ #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/orderAccess.hpp"
+ static const char* partition_name(ShenandoahFreeSetPartitionId t) {
+ switch (t) {
+ case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
+ case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
+ case ShenandoahFreeSetPartitionId::Collector: return "Collector";
+ case ShenandoahFreeSetPartitionId::OldCollector: return "OldCollector";
+ default:
+ ShouldNotReachHere();
+ return "Unrecognized";
+ }
+ }
+
+ #ifndef PRODUCT
+ void ShenandoahRegionPartitions::dump_bitmap() const {
+ log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
+ "], Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+ _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
+ _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
+ _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
+ _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)],
+ _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)],
+ _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]);
+ log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
+ "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
+ "], Empty Old Collecto range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+ _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
+ _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
+ _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
+ _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
+ _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
+ _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]);
+
+ log_debug(gc)("%6s: %18s %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "Old Collector Bits", "NotFree Bits");
+ dump_bitmap_range(0, _max-1);
+ }
+
+ void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
+ assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
+ assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
+ idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
+ idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
+ idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
+ while (aligned_start <= aligned_end) {
+ dump_bitmap_row(aligned_start);
+ aligned_start += alignment;
+ }
+ }
+
+ void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
+ assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
+ idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
+ uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
+ uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
+ uintx old_collector_bits = _membership[int(ShenandoahFreeSetPartitionId::OldCollector)].bits_at(aligned_idx);
+ uintx free_bits = mutator_bits | collector_bits | old_collector_bits;
+ uintx notfree_bits = ~free_bits;
+ log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
+ aligned_idx, mutator_bits, collector_bits, old_collector_bits, notfree_bits);
+ }
+ #endif
+
+ ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
+ _max(max_regions),
+ _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
+ _free_set(free_set),
+ _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) , ShenandoahSimpleBitMap(max_regions) }
+ {
+ make_all_regions_unavailable();
+ }
+
+ inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
+ return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
+ }
+
+ inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
+ ShenandoahHeapRegion* r = _heap->get_region(idx);
+ return can_allocate_from(r);
+ }
+
+ inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
+ if (r->is_trash()) {
+ // This would be recycled on allocation path
+ return ShenandoahHeapRegion::region_size_bytes();
+ } else {
+ return r->free();
+ }
+ }
+
+ inline size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
+ ShenandoahHeapRegion* r = _heap->get_region(idx);
+ return alloc_capacity(r);
+ }
+
+ inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
+ return alloc_capacity(r) > 0;
+ }
+
+ inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const {
+ assert (which_partition < NumPartitions, "selected free partition must be valid");
+ idx_t idx = _leftmosts[int(which_partition)];
+ if (idx >= _max) {
+ return _max;
+ } else {
+ // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
+ // to query the original value of leftmost when leftmost must be adjusted because the interval representing
+ // which_partition is shrinking after the region that used to be leftmost is retired.
+ return idx;
+ }
+ }
+
+ inline idx_t ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId which_partition) const {
+ assert (which_partition < NumPartitions, "selected free partition must be valid");
+ idx_t idx = _rightmosts[int(which_partition)];
+ // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
+ // to query the original value of leftmost when leftmost must be adjusted because the interval representing
+ // which_partition is shrinking after the region that used to be leftmost is retired.
+ return idx;
+ }
+
+ void ShenandoahRegionPartitions::make_all_regions_unavailable() {
+ for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
+ _membership[partition_id].clear_all();
+ _leftmosts[partition_id] = _max;
+ _rightmosts[partition_id] = -1;
+ _leftmosts_empty[partition_id] = _max;
+ _rightmosts_empty[partition_id] = -1;;
+ _capacity[partition_id] = 0;
+ _used[partition_id] = 0;
+ }
+ _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
+ }
+
+ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
+ idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
+ size_t mutator_region_count, size_t mutator_used) {
+ _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
+ _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
+ _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
+ _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
+
+ _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
+ _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
+ _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
+
+ _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
+ _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
+ _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
+ _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
+
+ _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
+ _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
+ _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
+ }
+
+ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost,
+ idx_t old_collector_leftmost_empty,
+ idx_t old_collector_rightmost_empty,
+ size_t old_collector_region_count, size_t old_collector_used) {
+ _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost;
+ _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost;
+ _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty;
+ _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost_empty;
+
+ _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count;
+ _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used;
+ _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count * _region_size_bytes;
+ }
+
+ void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
+ assert (which_partition < NumPartitions, "Partition must be valid");
+ _used[int(which_partition)] += bytes;
+ assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
+ "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
+ _used[int(which_partition)], _capacity[int(which_partition)], bytes);
+ }
+
+ inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
+ ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
+ assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
+ if (low_idx == leftmost(partition)) {
+ assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
+ if (high_idx + 1 == _max) {
+ _leftmosts[int(partition)] = _max;
+ } else {
+ _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
+ }
+ if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
+ // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
+ _leftmosts_empty[int(partition)] = _leftmosts[int(partition)];
+ }
+ }
+ if (high_idx == _rightmosts[int(partition)]) {
+ assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
+ if (low_idx == 0) {
+ _rightmosts[int(partition)] = -1;
+ } else {
+ _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
+ }
+ if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
+ // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
+ _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
+ }
+ }
+ if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
+ _leftmosts[int(partition)] = _max;
+ _rightmosts[int(partition)] = -1;
+ _leftmosts_empty[int(partition)] = _max;
+ _rightmosts_empty[int(partition)] = -1;
+ }
+ }
+
+ inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) {
+ shrink_interval_if_range_modifies_either_boundary(partition, idx, idx);
+ }
+
+ inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition,
+ idx_t idx, size_t region_available) {
+ if (_leftmosts[int(partition)] > idx) {
+ _leftmosts[int(partition)] = idx;
+ }
+ if (_rightmosts[int(partition)] < idx) {
+ _rightmosts[int(partition)] = idx;
+ }
+ if (region_available == _region_size_bytes) {
+ if (_leftmosts_empty[int(partition)] > idx) {
+ _leftmosts_empty[int(partition)] = idx;
+ }
+ if (_rightmosts_empty[int(partition)] < idx) {
+ _rightmosts_empty[int(partition)] = idx;
+ }
+ }
+ }
+
+ void ShenandoahRegionPartitions::retire_range_from_partition(
+ ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
+
+ // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
+ assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: " SIZE_FORMAT " and " SIZE_FORMAT " < " SIZE_FORMAT,
+ low_idx, high_idx, _max);
+ assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
+
+ for (idx_t idx = low_idx; idx <= high_idx; idx++) {
+ assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
+ _membership[int(partition)].clear_bit(idx);
+ }
+ _region_counts[int(partition)] -= high_idx + 1 - low_idx;
+ shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx);
+ }
+
+ void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) {
+
+ // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
+ assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
+ assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
+ assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
+
+ if (used_bytes < _region_size_bytes) {
+ // Count the alignment pad remnant of memory as used when we retire this region
+ increase_used(partition, _region_size_bytes - used_bytes);
+ }
+ _membership[int(partition)].clear_bit(idx);
+ shrink_interval_if_boundary_modified(partition, idx);
+ _region_counts[int(partition)]--;
+ }
+
+ void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
+ assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
+ assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
+ assert (which_partition < NumPartitions, "selected free partition must be valid");
+ assert (available <= _region_size_bytes, "Available cannot exceed region size");
+
+ _membership[int(which_partition)].set_bit(idx);
+ _capacity[int(which_partition)] += _region_size_bytes;
+ _used[int(which_partition)] += _region_size_bytes - available;
+ expand_interval_if_boundary_modified(which_partition, idx, available);
+ _region_counts[int(which_partition)]++;
+ }
+
+ bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) {
+ return (p == ShenandoahFreeSetPartitionId::Mutator);
+ }
+
+ bool ShenandoahRegionPartitions::is_young_collector_partition(ShenandoahFreeSetPartitionId p) {
+ return (p == ShenandoahFreeSetPartitionId::Collector);
+ }
+
+ bool ShenandoahRegionPartitions::is_old_collector_partition(ShenandoahFreeSetPartitionId p) {
+ return (p == ShenandoahFreeSetPartitionId::OldCollector);
+ }
+
+ bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_region) {
+ return (available_in_region == _region_size_bytes);
+ }
+
+
+ void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
+ ShenandoahFreeSetPartitionId new_partition, size_t available) {
+ ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx);
+ assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
+ assert (orig_partition < NumPartitions, "Original partition must be valid");
+ assert (new_partition < NumPartitions, "New partition must be valid");
+ assert (available <= _region_size_bytes, "Available cannot exceed region size");
+ assert (_membership[int(orig_partition)].is_set(idx), "Cannot move from partition unless in partition");
+ assert ((r != nullptr) && ((r->is_trash() && (available == _region_size_bytes)) ||
+ (r->used() + available == _region_size_bytes)),
+ "Used: " SIZE_FORMAT " + available: " SIZE_FORMAT " should equal region size: " SIZE_FORMAT,
+ ShenandoahHeap::heap()->get_region(idx)->used(), available, _region_size_bytes);
+
+ // Expected transitions:
+ // During rebuild: Mutator => Collector
+ // Mutator empty => Collector
+ // Mutator empty => OldCollector
+ // During flip_to_gc: Mutator empty => Collector
+ // Mutator empty => OldCollector
+ // At start of update refs: Collector => Mutator
+ // OldCollector Empty => Mutator
+ assert ((is_mutator_partition(orig_partition) && is_young_collector_partition(new_partition)) ||
+ (is_mutator_partition(orig_partition) &&
+ available_implies_empty(available) && is_old_collector_partition(new_partition)) ||
+ (is_young_collector_partition(orig_partition) && is_mutator_partition(new_partition)) ||
+ (is_old_collector_partition(orig_partition)
+ && available_implies_empty(available) && is_mutator_partition(new_partition)),
+ "Unexpected movement between partitions, available: " SIZE_FORMAT ", _region_size_bytes: " SIZE_FORMAT
+ ", orig_partition: %s, new_partition: %s",
+ available, _region_size_bytes, partition_name(orig_partition), partition_name(new_partition));
+
+ size_t used = _region_size_bytes - available;
+ assert (_used[int(orig_partition)] >= used,
+ "Orig partition used: " SIZE_FORMAT " must exceed moved used: " SIZE_FORMAT " within region " SSIZE_FORMAT,
+ _used[int(orig_partition)], used, idx);
+
+ _membership[int(orig_partition)].clear_bit(idx);
+ _membership[int(new_partition)].set_bit(idx);
+
+ _capacity[int(orig_partition)] -= _region_size_bytes;
+ _used[int(orig_partition)] -= used;
+ shrink_interval_if_boundary_modified(orig_partition, idx);
+
+ _capacity[int(new_partition)] += _region_size_bytes;;
+ _used[int(new_partition)] += used;
+ expand_interval_if_boundary_modified(new_partition, idx, available);
+
+ _region_counts[int(orig_partition)]--;
+ _region_counts[int(new_partition)]++;
+ }
+
+ const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
+ return partition_name(membership(idx));
+ }
+
+ inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t idx) const {
+ assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
+ ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree;
+ for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) {
+ if (_membership[partition_id].is_set(idx)) {
+ assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition");
+ result = (ShenandoahFreeSetPartitionId) partition_id;
+ }
+ }
+ return result;
+ }
+
+ #ifdef ASSERT
+ inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const {
+ assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
+ assert (test_partition < ShenandoahFreeSetPartitionId::NotFree, "must be a valid partition");
+
+ return membership(idx) == test_partition;
+ }
+ #endif
+
+ inline bool ShenandoahRegionPartitions::is_empty(ShenandoahFreeSetPartitionId which_partition) const {
+ assert (which_partition < NumPartitions, "selected free partition must be valid");
+ return (leftmost(which_partition) > rightmost(which_partition));
+ }
+
+ inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_region(
+ ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const {
+ idx_t rightmost_idx = rightmost(which_partition);
+ idx_t leftmost_idx = leftmost(which_partition);
+ if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
+ if (start_index < leftmost_idx) {
+ start_index = leftmost_idx;
+ }
+ idx_t result = _membership[int(which_partition)].find_first_set_bit(start_index, rightmost_idx + 1);
+ if (result > rightmost_idx) {
+ result = _max;
+ }
+ assert (result >= start_index, "Requires progress");
+ return result;
+ }
+
+ inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_region(
+ ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const {
+ idx_t rightmost_idx = rightmost(which_partition);
+ idx_t leftmost_idx = leftmost(which_partition);
+ // if (leftmost_idx == max) then (last_index < leftmost_idx)
+ if (last_index < leftmost_idx) return -1;
+ if (last_index > rightmost_idx) {
+ last_index = rightmost_idx;
+ }
+ idx_t result = _membership[int(which_partition)].find_last_set_bit(-1, last_index);
+ if (result < leftmost_idx) {
+ result = -1;
+ }
+ assert (result <= last_index, "Requires progress");
+ return result;
+ }
+
+ inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_cluster_of_regions(
+ ShenandoahFreeSetPartitionId which_partition, idx_t start_index, size_t cluster_size) const {
+ idx_t rightmost_idx = rightmost(which_partition);
+ idx_t leftmost_idx = leftmost(which_partition);
+ if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
+ idx_t result = _membership[int(which_partition)].find_first_consecutive_set_bits(start_index, rightmost_idx + 1, cluster_size);
+ if (result > rightmost_idx) {
+ result = _max;
+ }
+ assert (result >= start_index, "Requires progress");
+ return result;
+ }
+
+ inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_cluster_of_regions(
+ ShenandoahFreeSetPartitionId which_partition, idx_t last_index, size_t cluster_size) const {
+ idx_t leftmost_idx = leftmost(which_partition);
+ // if (leftmost_idx == max) then (last_index < leftmost_idx)
+ if (last_index < leftmost_idx) return -1;
+ idx_t result = _membership[int(which_partition)].find_last_consecutive_set_bits(leftmost_idx - 1, last_index, cluster_size);
+ if (result <= leftmost_idx) {
+ result = -1;
+ }
+ assert (result <= last_index, "Requires progress");
+ return result;
+ }
+
+ idx_t ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId which_partition) {
+ assert (which_partition < NumPartitions, "selected free partition must be valid");
+ idx_t max_regions = _max;
+ if (_leftmosts_empty[int(which_partition)] == _max) {
+ return _max;
+ }
+ for (idx_t idx = find_index_of_next_available_region(which_partition, _leftmosts_empty[int(which_partition)]);
+ idx < max_regions; ) {
+ assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
+ if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
+ _leftmosts_empty[int(which_partition)] = idx;
+ return idx;
+ }
+ idx = find_index_of_next_available_region(which_partition, idx + 1);
+ }
+ _leftmosts_empty[int(which_partition)] = _max;
+ _rightmosts_empty[int(which_partition)] = -1;
+ return _max;
+ }
+
+ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId which_partition) {
+ assert (which_partition < NumPartitions, "selected free partition must be valid");
+ if (_rightmosts_empty[int(which_partition)] < 0) {
+ return -1;
+ }
+ for (idx_t idx = find_index_of_previous_available_region(which_partition, _rightmosts_empty[int(which_partition)]);
+ idx >= 0; ) {
+ assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
+ if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
+ _rightmosts_empty[int(which_partition)] = idx;
+ return idx;
+ }
+ idx = find_index_of_previous_available_region(which_partition, idx - 1);
+ }
+ _leftmosts_empty[int(which_partition)] = _max;
+ _rightmosts_empty[int(which_partition)] = -1;
+ return -1;
+ }
+
+
+ #ifdef ASSERT
+ void ShenandoahRegionPartitions::assert_bounds() {
+
+ idx_t leftmosts[UIntNumPartitions];
+ idx_t rightmosts[UIntNumPartitions];
+ idx_t empty_leftmosts[UIntNumPartitions];
+ idx_t empty_rightmosts[UIntNumPartitions];
+
+ for (uint i = 0; i < UIntNumPartitions; i++) {
+ leftmosts[i] = _max;
+ empty_leftmosts[i] = _max;
+ rightmosts[i] = -1;
+ empty_rightmosts[i] = -1;
+ }
+
+ for (idx_t i = 0; i < _max; i++) {
+ ShenandoahFreeSetPartitionId partition = membership(i);
+ switch (partition) {
+ case ShenandoahFreeSetPartitionId::NotFree:
+ break;
+
+ case ShenandoahFreeSetPartitionId::Mutator:
+ case ShenandoahFreeSetPartitionId::Collector:
+ case ShenandoahFreeSetPartitionId::OldCollector:
+ {
+ size_t capacity = _free_set->alloc_capacity(i);
+ bool is_empty = (capacity == _region_size_bytes);
+ assert(capacity > 0, "free regions must have allocation capacity");
+ if (i < leftmosts[int(partition)]) {
+ leftmosts[int(partition)] = i;
+ }
+ if (is_empty && (i < empty_leftmosts[int(partition)])) {
+ empty_leftmosts[int(partition)] = i;
+ }
+ if (i > rightmosts[int(partition)]) {
+ rightmosts[int(partition)] = i;
+ }
+ if (is_empty && (i > empty_rightmosts[int(partition)])) {
+ empty_rightmosts[int(partition)] = i;
+ }
+ break;
+ }
+
+ default:
+ ShouldNotReachHere();
+ }
+ }
+
+ // Performance invariants. Failing these would not break the free partition, but performance would suffer.
+ assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) <= _max,
+ "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Mutator), _max);
+ assert (rightmost(ShenandoahFreeSetPartitionId::Mutator) < _max,
+ "rightmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator), _max);
+
+ assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
+ || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
+ "leftmost region should be free: " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Mutator));
+ assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
+ || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
+ "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator));
+
+ // If Mutator partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
+ // Likewise for empty region partitions.
+ idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
+ idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
+ assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
+ "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
+ assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
+ "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
+
+ beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
+ end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
+ assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
+ "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
+ assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
+ "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
+
+ // Performance invariants. Failing these would not break the free partition, but performance would suffer.
+ assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT,
+ leftmost(ShenandoahFreeSetPartitionId::Collector), _max);
+ assert (rightmost(ShenandoahFreeSetPartitionId::Collector) < _max, "rightmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT,
+ rightmost(ShenandoahFreeSetPartitionId::Collector), _max);
+
+ assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
+ || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
+ "leftmost region should be free: " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Collector));
+ assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
+ || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
+ "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Collector));
+
+ // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
+ // Likewise for empty region partitions.
+ beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
+ end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
+ assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
+ "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
+ assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
+ "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
+
+ beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
+ end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
+ assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
+ "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
+ assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
+ "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
+
+ // Performance invariants. Failing these would not break the free partition, but performance would suffer.
+ assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT,
+ leftmost(ShenandoahFreeSetPartitionId::OldCollector), _max);
+ assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "rightmost in bounds: " SSIZE_FORMAT " < " SSIZE_FORMAT,
+ rightmost(ShenandoahFreeSetPartitionId::OldCollector), _max);
+
+ assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
+ || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+ ShenandoahFreeSetPartitionId::OldCollector),
+ "leftmost region should be free: " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
+ assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
+ || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::OldCollector),
+ ShenandoahFreeSetPartitionId::OldCollector),
+ "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
+
+ // If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
+ // Likewise for empty region partitions.
+ beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
+ end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
+ assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+ "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
+ assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
+ "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
+
+ beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
+ end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
+ assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
+ "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
+ assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
+ "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+ end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
+ }
+ #endif
+
ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
_heap(heap),
- _mutator_free_bitmap(max_regions, mtGC),
- _collector_free_bitmap(max_regions, mtGC),
- _max(max_regions)
+ _partitions(max_regions, this),
+ _trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
+ _alloc_bias_weight(0)
{
clear_internal();
}
- void ShenandoahFreeSet::increase_used(size_t num_bytes) {
+ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
shenandoah_assert_heaplocked();
- _used += num_bytes;
-
- assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
- ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);
- }
-
- bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
- assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
- idx, _max, _mutator_leftmost, _mutator_rightmost);
- return _mutator_free_bitmap.at(idx);
+ size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
+ size_t idx = region->index();
+ size_t capacity = alloc_capacity(region);
+ assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree,
+ "Regions promoted in place should have been excluded from Mutator partition");
+ if (capacity >= plab_min_size_in_bytes) {
+ _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, capacity);
+ _heap->old_generation()->augment_promoted_reserve(capacity);
+ }
}
- bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
- assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
- idx, _max, _collector_leftmost, _collector_rightmost);
- return _collector_free_bitmap.at(idx);
+ HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition,
+ ShenandoahAffiliation affiliation,
+ ShenandoahAllocRequest& req, bool& in_new_region) {
+ shenandoah_assert_heaplocked();
+ idx_t rightmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
+ _partitions.rightmost_empty(which_partition): _partitions.rightmost(which_partition));
+ idx_t leftmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
+ _partitions.leftmost_empty(which_partition): _partitions.leftmost(which_partition));
+ if (_partitions.alloc_from_left_bias(which_partition)) {
+ for (idx_t idx = leftmost_collector; idx <= rightmost_collector; ) {
+ assert(_partitions.in_free_set(which_partition, idx), "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
+ ShenandoahHeapRegion* r = _heap->get_region(idx);
+ if (r->affiliation() == affiliation) {
+ HeapWord* result = try_allocate_in(r, req, in_new_region);
+ if (result != nullptr) {
+ return result;
+ }
+ }
+ idx = _partitions.find_index_of_next_available_region(which_partition, idx + 1);
+ }
+ } else {
+ for (idx_t idx = rightmost_collector; idx >= leftmost_collector; ) {
+ assert(_partitions.in_free_set(which_partition, idx),
+ "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
+ ShenandoahHeapRegion* r = _heap->get_region(idx);
+ if (r->affiliation() == affiliation) {
+ HeapWord* result = try_allocate_in(r, req, in_new_region);
+ if (result != nullptr) {
+ return result;
+ }
+ }
+ idx = _partitions.find_index_of_previous_available_region(which_partition, idx - 1);
+ }
+ }
+ log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
+ shenandoah_affiliation_name(affiliation), p2i(&req));
+ return nullptr;
}
HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
+ shenandoah_assert_heaplocked();
+
// Scan the bitmap looking for a first fit.
//
// Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
// we would find the region to allocate at right away.
//
- // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
- // go to the end. This makes application allocation faster, because we would clear lots
- // of regions from the beginning most of the time.
+ // Allocations are biased: GC allocations are taken from the high end of the heap. Regular (and TLAB)
+ // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
+ // Humongous mutator allocations are taken from the bottom of the heap.
//
- // Free set maintains mutator and collector views, and normally they allocate in their views only,
- // unless we special cases for stealing and mixed allocations.
+ // Free set maintains mutator and collector partitions. Normally, each allocates only from its partition,
+ // except in special cases when the collector steals regions from the mutator partition.
+
+ // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
+ bool allow_new_region = true;
+ if (_heap->mode()->is_generational()) {
+ switch (req.affiliation()) {
+ case ShenandoahAffiliation::OLD_GENERATION:
+ // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
+ if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
+ allow_new_region = false;
+ }
+ break;
+
+ case ShenandoahAffiliation::YOUNG_GENERATION:
+ // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
+ if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
+ allow_new_region = false;
+ }
+ break;
+
+ case ShenandoahAffiliation::FREE:
+ fatal("Should request affiliation");
+ default:
+ ShouldNotReachHere();
+ break;
+ }
+ }
switch (req.type()) {
case ShenandoahAllocRequest::_alloc_tlab:
case ShenandoahAllocRequest::_alloc_shared: {
-
// Try to allocate in the mutator view
- for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
- if (is_mutator_free(idx)) {
- HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
- if (result != nullptr) {
- return result;
+ if (_alloc_bias_weight-- <= 0) {
+ // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
+ // of the heap. Typically, these are the more recently engaged regions and the objects in these regions have not
+ // yet had a chance to die (and/or are treated as floating garbage). If we use the same allocation bias on each
+ // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
+ // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
+ // during the most recent GC pass may once again prevent the region from being collected. We have found that
+ // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
+ // benchmarks. In the best case, this has the effect of consuming these partially consumed regions before
+ // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
+ //
+ // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
+ // available regions. Other potential benefits:
+ // 1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
+ // 2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
+ // late in the GC cycle.
+ idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
+ - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
+ idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
+ - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
+ _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
+ _alloc_bias_weight = _InitialAllocBiasWeight;
+ }
+ if (!_partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)) {
+ // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
+ if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
+ // Use signed idx. Otherwise, loop will never terminate.
+ idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
+ for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
+ assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
+ "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
+ ShenandoahHeapRegion* r = _heap->get_region(idx);
+ // try_allocate_in() increases used if the allocation is successful.
+ HeapWord* result;
+ size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
+ if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
+ return result;
+ }
+ idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
+ }
+ }
+ } else {
+ // Allocate from low to high memory. This keeps the range of fully empty regions more tightly packed.
+ // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle. So this
+ // tends to accumulate "fragmented" uncollected regions in high memory.
+ if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
+ // Use signed idx. Otherwise, loop will never terminate.
+ idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
+ for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
+ assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
+ "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
+ ShenandoahHeapRegion* r = _heap->get_region(idx);
+ // try_allocate_in() increases used if the allocation is successful.
+ HeapWord* result;
+ size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
+ if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
+ return result;
+ }
+ idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
}
}
}
-
// There is no recovery. Mutator does not touch collector view at all.
break;
}
case ShenandoahAllocRequest::_alloc_gclab:
- case ShenandoahAllocRequest::_alloc_shared_gc: {
- // size_t is unsigned, need to dodge underflow when _leftmost = 0
+ // GCLABs are for evacuation so we must be in evacuation phase.
+ case ShenandoahAllocRequest::_alloc_plab: {
+ // PLABs always reside in old-gen and are only allocated during
+ // evacuation phase.
+
+ case ShenandoahAllocRequest::_alloc_shared_gc: {
// Fast-path: try to allocate in the collector view first
- for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
- size_t idx = c - 1;
- if (is_collector_free(idx)) {
- HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
- if (result != nullptr) {
- return result;
- }
+ HeapWord* result;
+ result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
+ ShenandoahFreeSetPartitionId::Collector,
+ req.affiliation(), req, in_new_region);
+ if (result != nullptr) {
+ return result;
+ } else if (allow_new_region) {
+ // Try a free region that is dedicated to GC allocations.
+ result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
+ ShenandoahFreeSetPartitionId::Collector,
+ ShenandoahAffiliation::FREE, req, in_new_region);
+ if (result != nullptr) {
+ return result;
}
}
// No dice. Can we borrow space from mutator view?
if (!ShenandoahEvacReserveOverflow) {
return nullptr;
}
+ if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
+ // This allows us to flip a mutator region to old_collector
+ allow_new_region = true;
+ }
- // Try to steal the empty region from the mutator view
- for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
- size_t idx = c - 1;
- if (is_mutator_free(idx)) {
+ // We should expand old-gen if this can prevent an old-gen evacuation failure. We don't care so much about
+ // promotion failures since they can be mitigated in a subsequent GC pass. Would be nice to know if this
+ // allocation request is for evacuation or promotion. Individual threads limit their use of PLAB memory for
+ // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
+ // only for old-gen evacuations.
+ if (allow_new_region) {
+ // Try to steal an empty region from the mutator view.
+ idx_t rightmost_mutator = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator);
+ idx_t leftmost_mutator = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
+ for (idx_t idx = rightmost_mutator; idx >= leftmost_mutator; ) {
+ assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
+ "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
ShenandoahHeapRegion* r = _heap->get_region(idx);
if (can_allocate_from(r)) {
- flip_to_gc(r);
- HeapWord *result = try_allocate_in(r, req, in_new_region);
- if (result != nullptr) {
- return result;
+ if (req.is_old()) {
+ flip_to_old_gc(r);
+ } else {
+ flip_to_gc(r);
}
+ // Region r is entirely empty. If try_allocat_in fails on region r, something else is really wrong.
+ // Don't bother to retry with other regions.
+ log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
+ return try_allocate_in(r, req, in_new_region);
}
+ idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
}
}
-
- // No dice. Do not try to mix mutator and GC allocations, because
- // URWM moves due to GC allocations would expose unparsable mutator
- // allocations.
-
+ // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
+ // due to GC allocations would expose unparsable mutator allocations.
break;
}
+ }
default:
ShouldNotReachHere();
}
-
return nullptr;
}
- HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
- assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
+ // This work method takes an argument corresponding to the number of bytes
+ // free in a region, and returns the largest amount in heapwords that can be allocated
+ // such that both of the following conditions are satisfied:
+ //
+ // 1. it is a multiple of card size
+ // 2. any remaining shard may be filled with a filler object
+ //
+ // The idea is that the allocation starts and ends at card boundaries. Because
+ // a region ('s end) is card-aligned, the remainder shard that must be filled is
+ // at the start of the free space.
+ //
+ // This is merely a helper method to use for the purpose of such a calculation.
+ size_t ShenandoahFreeSet::get_usable_free_words(size_t free_bytes) const {
+ // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
+ // free is 514
+ // usable_free is 512, which is decreased to 0
+ size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
+ assert(usable_free <= free_bytes, "Sanity check");
+ if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
+ // After aligning to card multiples, the remainder would be smaller than
+ // the minimum filler object, so we'll need to take away another card's
+ // worth to construct a filler object.
+ if (usable_free >= CardTable::card_size()) {
+ usable_free -= CardTable::card_size();
+ } else {
+ assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
+ }
+ }
+
+ return usable_free / HeapWordSize;
+ }
+
+ // Given a size argument, which is a multiple of card size, a request struct
+ // for a PLAB, and an old region, return a pointer to the allocated space for
+ // a PLAB which is card-aligned and where any remaining shard in the region
+ // has been suitably filled by a filler object.
+ // It is assumed (and assertion-checked) that such an allocation is always possible.
+ HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
+ assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
+ assert(r->is_old(), "All PLABs reside in old-gen");
+ assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
+ assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design");
+
+ HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
+ assert(result != nullptr, "Allocation cannot fail");
+ assert(r->top() <= r->end(), "Allocation cannot span end of region");
+ assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
+ return result;
+ }
- if (_heap->is_concurrent_weak_root_in_progress() &&
- r->is_trash()) {
+ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
+ assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
+ if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
return nullptr;
}
-
+ HeapWord* result = nullptr;
try_recycle_trashed(r);
-
in_new_region = r->is_empty();
- HeapWord* result = nullptr;
- size_t size = req.size();
+ if (in_new_region) {
+ log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
+ r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
+ assert(!r->is_affiliated(), "New region " SIZE_FORMAT " should be unaffiliated", r->index());
+ r->set_affiliation(req.affiliation());
+ if (r->is_old()) {
+ // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
+ // all objects allocated within this region are above TAMS (and thus are implicitly marked). In case this is an
+ // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
+ // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
+ // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
+ // coalesce-and-fill processing.
+ r->end_preemptible_coalesce_and_fill();
+ _heap->old_generation()->clear_cards_for(r);
+ }
+ _heap->generation_for(r->affiliation())->increment_affiliated_region_count();
- if (req.is_lab_alloc()) {
- size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
- if (size > free) {
- size = free;
+ #ifdef ASSERT
+ ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
+ assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
+ assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
+ #endif
+ log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
+ r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
+ } else {
+ assert(r->is_affiliated(), "Region " SIZE_FORMAT " that is not new should be affiliated", r->index());
+ if (r->affiliation() != req.affiliation()) {
+ assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
+ req.affiliation_name(), r->affiliation_name());
+ return nullptr;
}
- if (size >= req.min_size()) {
- result = r->allocate(size, req.type());
- assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
+ }
+
+ // req.size() is in words, r->free() is in bytes.
+ if (req.is_lab_alloc()) {
+ size_t adjusted_size = req.size();
+ size_t free = r->free(); // free represents bytes available within region r
+ if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
+ // This is a PLAB allocation
+ assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
+ assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
+ "PLABS must be allocated in old_collector_free regions");
+
+ // Need to assure that plabs are aligned on multiple of card region
+ // Convert free from unaligned bytes to aligned number of words
+ size_t usable_free = get_usable_free_words(free);
+ if (adjusted_size > usable_free) {
+ adjusted_size = usable_free;
+ }
+ adjusted_size = align_down(adjusted_size, CardTable::card_size_in_words());
+ if (adjusted_size >= req.min_size()) {
+ result = allocate_aligned_plab(adjusted_size, req, r);
+ assert(result != nullptr, "allocate must succeed");
+ req.set_actual_size(adjusted_size);
+ } else {
+ // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
+ log_trace(gc, free)("Failed to shrink PLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
+ " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
+ }
+ } else {
+ // This is a GCLAB or a TLAB allocation
+ // Convert free from unaligned bytes to aligned number of words
+ free = align_down(free >> LogHeapWordSize, MinObjAlignment);
+ if (adjusted_size > free) {
+ adjusted_size = free;
+ }
+ if (adjusted_size >= req.min_size()) {
+ result = r->allocate(adjusted_size, req);
+ assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
+ req.set_actual_size(adjusted_size);
+ } else {
+ log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
+ " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
+ }
}
} else {
- result = r->allocate(size, req.type());
+ size_t size = req.size();
+ result = r->allocate(size, req);
+ if (result != nullptr) {
+ // Record actual allocation size
+ req.set_actual_size(size);
+ }
}
if (result != nullptr) {
// Allocation successful, bump stats:
if (req.is_mutator_alloc()) {
- increase_used(size * HeapWordSize);
- }
-
- // Record actual allocation size
- req.set_actual_size(size);
+ assert(req.is_young(), "Mutator allocations always come from young generation.");
+ _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
+ } else {
+ assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
- if (req.is_gc_alloc()) {
+ // For GC allocations, we advance update_watermark because the objects relocated into this memory during
+ // evacuation are not updated during evacuation. For both young and old regions r, it is essential that all
+ // PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation.
r->set_update_watermark(r->top());
+ if (r->is_old()) {
+ _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, req.actual_size() * HeapWordSize);
+ assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
+ // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
+ } else {
+ _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, req.actual_size() * HeapWordSize);
+ }
}
}
- if (result == nullptr || has_no_alloc_capacity(r)) {
- // Region cannot afford this or future allocations. Retire it.
- //
- // While this seems a bit harsh, especially in the case when this large allocation does not
- // fit, but the next small one would, we are risking to inflate scan times when lots of
- // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
- // TODO: Record first fully-empty region, and use that for large allocations
+ static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
+ size_t ac = alloc_capacity(r);
+
+ if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
+ // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
+ // Note that retire_from_partition() increases used to account for waste.
+
+ // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
+ // then retire the region so that subsequent searches can find available memory more quickly.
- // Record the remainder as allocation waste
+ size_t idx = r->index();
+ ShenandoahFreeSetPartitionId orig_partition;
if (req.is_mutator_alloc()) {
- size_t waste = r->free();
- if (waste > 0) {
- increase_used(waste);
- _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);
+ orig_partition = ShenandoahFreeSetPartitionId::Mutator;
+ } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) {
+ orig_partition = ShenandoahFreeSetPartitionId::Collector;
+ } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
+ orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
+ } else {
+ assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type");
+ if (req.is_old()) {
+ orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
+ } else {
+ orig_partition = ShenandoahFreeSetPartitionId::Collector;
}
}
-
- size_t num = r->index();
- _collector_free_bitmap.clear_bit(num);
- _mutator_free_bitmap.clear_bit(num);
- // Touched the bounds? Need to update:
- if (touches_bounds(num)) {
- adjust_bounds();
- }
- assert_bounds();
+ _partitions.retire_from_partition(orig_partition, idx, r->used());
+ _partitions.assert_bounds();
}
return result;
}
- bool ShenandoahFreeSet::touches_bounds(size_t num) const {
- return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;
- }
-
- void ShenandoahFreeSet::recompute_bounds() {
- // Reset to the most pessimistic case:
- _mutator_rightmost = _max - 1;
- _mutator_leftmost = 0;
- _collector_rightmost = _max - 1;
- _collector_leftmost = 0;
-
- // ...and adjust from there
- adjust_bounds();
- }
-
- void ShenandoahFreeSet::adjust_bounds() {
- // Rewind both mutator bounds until the next bit.
- while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
- _mutator_leftmost++;
- }
- while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
- _mutator_rightmost--;
- }
- // Rewind both collector bounds until the next bit.
- while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
- _collector_leftmost++;
- }
- while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
- _collector_rightmost--;
- }
- }
-
HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
+ assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
shenandoah_assert_heaplocked();
size_t words_size = req.size();
- size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
+ idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
- // No regions left to satisfy allocation, bye.
- if (num > mutator_count()) {
+ assert(req.is_young(), "Humongous regions always allocated in YOUNG");
+ ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
+
+ // Check if there are enough regions left to satisfy allocation.
+ if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
return nullptr;
}
+ idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
+ idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
+ idx_t last_possible_start = end_range - num;
+
// Find the continuous interval of $num regions, starting from $beg and ending in $end,
// inclusive. Contiguous allocations are biased to the beginning.
-
- size_t beg = _mutator_leftmost;
- size_t end = beg;
+ idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
+ start_range, num);
+ if (beg > last_possible_start) {
+ // Hit the end, goodbye
+ return nullptr;
+ }
+ idx_t end = beg;
while (true) {
- if (end >= _max) {
- // Hit the end, goodbye
- return nullptr;
- }
-
- // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
- // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
- if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
- end++;
- beg = end;
- continue;
+ // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
+ // If region is not completely free, the current [beg; end] is useless, and we may fast-forward. If we can extend
+ // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
+ while (!can_allocate_from(_heap->get_region(end))) {
+ // region[end] is not empty, so we restart our search after region[end]
+ idx_t slide_delta = end + 1 - beg;
+ if (beg + slide_delta > last_possible_start) {
+ // no room to slide
+ return nullptr;
+ }
+ for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
+ if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
+ beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
+ span_end + 1, num);
+ break;
+ } else {
+ beg++;
+ span_end++;
+ }
+ }
+ // Here, either beg identifies a range of num regions all of which are in the Mutator free set, or beg > last_possible_start
+ if (beg > last_possible_start) {
+ // Hit the end, goodbye
+ return nullptr;
+ }
+ end = beg;
}
if ((end - beg + 1) == num) {
// found the match
break;
end++;
}
size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
-
// Initialize regions:
- for (size_t i = beg; i <= end; i++) {
+ for (idx_t i = beg; i <= end; i++) {
ShenandoahHeapRegion* r = _heap->get_region(i);
try_recycle_trashed(r);
assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
assert(r->is_empty(), "Should be empty");
used_words = remainder;
} else {
used_words = ShenandoahHeapRegion::region_size_words();
}
+ r->set_affiliation(req.affiliation());
+ r->set_update_watermark(r->bottom());
r->set_top(r->bottom() + used_words);
-
- _mutator_free_bitmap.clear_bit(r->index());
}
-
- // While individual regions report their true use, all humongous regions are
- // marked used in the free set.
- increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
-
+ generation->increase_affiliated_region_count(num);
if (remainder != 0) {
// Record this remainder as allocation waste
_heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
}
- // Allocated at left/rightmost? Move the bounds appropriately.
- if (beg == _mutator_leftmost || end == _mutator_rightmost) {
- adjust_bounds();
- }
- assert_bounds();
+ // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
+ _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
+ size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
+ _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
+ _partitions.assert_bounds();
req.set_actual_size(words_size);
- return _heap->get_region(beg)->bottom();
- }
-
- bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
- return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
- }
-
- size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
- if (r->is_trash()) {
- // This would be recycled on allocation path
- return ShenandoahHeapRegion::region_size_bytes();
- } else {
- return r->free();
+ if (remainder != 0) {
+ req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
}
+ return _heap->get_region(beg)->bottom();
}
- bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
- return alloc_capacity(r) == 0;
- }
-
- void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
+ void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) {
if (r->is_trash()) {
- _heap->decrease_used(r->used());
r->recycle();
}
}
void ShenandoahFreeSet::recycle_trash() {
// lock is not reentrable, check we don't have it
shenandoah_assert_not_heaplocked();
-
+ size_t count = 0;
for (size_t i = 0; i < _heap->num_regions(); i++) {
ShenandoahHeapRegion* r = _heap->get_region(i);
if (r->is_trash()) {
- ShenandoahHeapLocker locker(_heap->lock());
- try_recycle_trashed(r);
+ _trash_regions[count++] = r;
}
- SpinPause(); // allow allocators to take the lock
+ }
+
+ size_t total_batches = 0;
+ jlong batch_start_time = 0;
+ jlong recycle_trash_start_time = os::javaTimeNanos(); // This value will be treated as the initial batch_start_time
+ jlong batch_end_time = recycle_trash_start_time;
+ // Process as many batches as can be processed within 10 us.
+ static constexpr jlong deadline_ns = 10000; // 10 us
+ size_t idx = 0;
+ jlong predicted_next_batch_end_time;
+ jlong batch_process_time_estimate = 0;
+ while (idx < count) {
+ if (idx > 0) {
+ os::naked_yield(); // Yield to allow allocators to take the lock, except on the first iteration
+ }
+ // Avoid another call to javaTimeNanos() if we already know time at which last batch ended
+ batch_start_time = batch_end_time;
+ const jlong deadline = batch_start_time + deadline_ns;
+
+ ShenandoahHeapLocker locker(_heap->lock());
+ do {
+ // Measurements on typical 2024 hardware suggest it typically requires between 1400 and 2000 ns to process a batch of
+ // 32 regions, assuming low contention with other threads. Sometimes this goes higher, when mutator threads
+ // are contending for CPU cores and/or the heap lock. On this hardware with a 10 us deadline, we expect 3-6 batches
+ // to be processed between yields most of the time.
+ //
+ // Note that deadline is enforced since the end of previous batch. In the case that yield() or acquisition of heap lock
+ // takes a "long time", we will have less time to process regions, but we will always process at least one batch between
+ // yields. Yielding more frequently when there is heavy contention for the heap lock or for CPU cores is considered the
+ // right thing to do.
+ const size_t REGIONS_PER_BATCH = 32;
+ size_t max_idx = MIN2(count, idx + REGIONS_PER_BATCH);
+ while (idx < max_idx) {
+ try_recycle_trashed(_trash_regions[idx++]);
+ }
+ total_batches++;
+ batch_end_time = os::javaTimeNanos();
+ // Estimate includes historic combination of yield times and heap lock acquisition times.
+ batch_process_time_estimate = (batch_end_time - recycle_trash_start_time) / total_batches;
+ predicted_next_batch_end_time = batch_end_time + batch_process_time_estimate;
+ } while ((idx < count) && (predicted_next_batch_end_time < deadline));
}
}
- void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
+ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
size_t idx = r->index();
- assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
+ assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
assert(can_allocate_from(r), "Should not be allocated");
- _mutator_free_bitmap.clear_bit(idx);
- _collector_free_bitmap.set_bit(idx);
- _collector_leftmost = MIN2(idx, _collector_leftmost);
- _collector_rightmost = MAX2(idx, _collector_rightmost);
+ ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
+ size_t region_capacity = alloc_capacity(r);
+ _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
+ ShenandoahFreeSetPartitionId::OldCollector, region_capacity);
+ _partitions.assert_bounds();
+ _heap->old_generation()->augment_evacuation_reserve(region_capacity);
+ bool transferred = gen_heap->generation_sizer()->transfer_to_old(1);
+ if (!transferred) {
+ log_warning(gc, free)("Forcing transfer of " SIZE_FORMAT " to old reserve.", idx);
+ gen_heap->generation_sizer()->force_transfer_to_old(1);
+ }
+ // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
+ // to recycle trash before attempting to allocate anything in the region.
+ }
- _capacity -= alloc_capacity(r);
+ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
+ size_t idx = r->index();
- if (touches_bounds(idx)) {
- adjust_bounds();
- }
- assert_bounds();
+ assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
+ assert(can_allocate_from(r), "Should not be allocated");
+
+ size_t ac = alloc_capacity(r);
+ _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
+ ShenandoahFreeSetPartitionId::Collector, ac);
+ _partitions.assert_bounds();
+
+ // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
+ // to recycle trash before attempting to allocate anything in the region.
}
void ShenandoahFreeSet::clear() {
shenandoah_assert_heaplocked();
clear_internal();
}
void ShenandoahFreeSet::clear_internal() {
- _mutator_free_bitmap.clear();
- _collector_free_bitmap.clear();
- _mutator_leftmost = _max;
- _mutator_rightmost = 0;
- _collector_leftmost = _max;
- _collector_rightmost = 0;
- _capacity = 0;
- _used = 0;
+ _partitions.make_all_regions_unavailable();
+
+ _alloc_bias_weight = 0;
+ _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true);
+ _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Collector, false);
+ _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false);
}
- void ShenandoahFreeSet::rebuild() {
- shenandoah_assert_heaplocked();
- clear();
+ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
+ size_t &first_old_region, size_t &last_old_region,
+ size_t &old_region_count) {
+ clear_internal();
- for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
+ first_old_region = _heap->num_regions();
+ last_old_region = 0;
+ old_region_count = 0;
+ old_cset_regions = 0;
+ young_cset_regions = 0;
+
+ size_t region_size_bytes = _partitions.region_size_bytes();
+ size_t max_regions = _partitions.max_regions();
+
+ size_t mutator_leftmost = max_regions;
+ size_t mutator_rightmost = 0;
+ size_t mutator_leftmost_empty = max_regions;
+ size_t mutator_rightmost_empty = 0;
+ size_t mutator_regions = 0;
+ size_t mutator_used = 0;
+
+ size_t old_collector_leftmost = max_regions;
+ size_t old_collector_rightmost = 0;
+ size_t old_collector_leftmost_empty = max_regions;
+ size_t old_collector_rightmost_empty = 0;
+ size_t old_collector_regions = 0;
+ size_t old_collector_used = 0;
+
+ size_t num_regions = _heap->num_regions();
+ for (size_t idx = 0; idx < num_regions; idx++) {
ShenandoahHeapRegion* region = _heap->get_region(idx);
+ if (region->is_trash()) {
+ // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
+ // The cset regions are not "trashed" until we have finished update refs.
+ if (region->is_old()) {
+ old_cset_regions++;
+ } else {
+ assert(region->is_young(), "Trashed region should be old or young");
+ young_cset_regions++;
+ }
+ } else if (region->is_old()) {
+ // count both humongous and regular regions, but don't count trash (cset) regions.
+ old_region_count++;
+ if (first_old_region > idx) {
+ first_old_region = idx;
+ }
+ last_old_region = idx;
+ }
if (region->is_alloc_allowed() || region->is_trash()) {
- assert(!region->is_cset(), "Shouldn't be adding those to the free set");
+ assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
+
+ // Do not add regions that would almost surely fail allocation
+ size_t ac = alloc_capacity(region);
+ if (ac > PLAB::min_size() * HeapWordSize) {
+ if (region->is_trash() || !region->is_old()) {
+ // Both young and old collected regions (trashed) are placed into the Mutator set
+ _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
+ if (idx < mutator_leftmost) {
+ mutator_leftmost = idx;
+ }
+ if (idx > mutator_rightmost) {
+ mutator_rightmost = idx;
+ }
+ if (ac == region_size_bytes) {
+ if (idx < mutator_leftmost_empty) {
+ mutator_leftmost_empty = idx;
+ }
+ if (idx > mutator_rightmost_empty) {
+ mutator_rightmost_empty = idx;
+ }
+ }
+ mutator_regions++;
+ mutator_used += (region_size_bytes - ac);
+ } else {
+ // !region->is_trash() && region is_old()
+ _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::OldCollector);
+ if (idx < old_collector_leftmost) {
+ old_collector_leftmost = idx;
+ }
+ if (idx > old_collector_rightmost) {
+ old_collector_rightmost = idx;
+ }
+ if (ac == region_size_bytes) {
+ if (idx < old_collector_leftmost_empty) {
+ old_collector_leftmost_empty = idx;
+ }
+ if (idx > old_collector_rightmost_empty) {
+ old_collector_rightmost_empty = idx;
+ }
+ }
+ old_collector_regions++;
+ old_collector_used += (region_size_bytes - ac);
+ }
+ }
+ }
+ }
+ log_debug(gc)(" At end of prep_to_rebuild, mutator_leftmost: " SIZE_FORMAT
+ ", mutator_rightmost: " SIZE_FORMAT
+ ", mutator_leftmost_empty: " SIZE_FORMAT
+ ", mutator_rightmost_empty: " SIZE_FORMAT
+ ", mutator_regions: " SIZE_FORMAT
+ ", mutator_used: " SIZE_FORMAT,
+ mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
+ mutator_regions, mutator_used);
+
+ log_debug(gc)(" old_collector_leftmost: " SIZE_FORMAT
+ ", old_collector_rightmost: " SIZE_FORMAT
+ ", old_collector_leftmost_empty: " SIZE_FORMAT
+ ", old_collector_rightmost_empty: " SIZE_FORMAT
+ ", old_collector_regions: " SIZE_FORMAT
+ ", old_collector_used: " SIZE_FORMAT,
+ old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty,
+ old_collector_regions, old_collector_used);
+
+ idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost;
+ idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty;
+ _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx,
+ mutator_regions, mutator_used);
+ rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost;
+ rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty;
+ _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, old_collector_leftmost_empty,
+ rightmost_empty_idx, old_collector_regions, old_collector_used);
+ log_debug(gc)(" After find_regions_with_alloc_capacity(), Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
+ " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
+ }
- // Do not add regions that would surely fail allocation
- if (has_no_alloc_capacity(region)) continue;
+ // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
+ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
+ size_t max_xfer_regions,
+ size_t& bytes_transferred) {
+ shenandoah_assert_heaplocked();
+ size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+ size_t transferred_regions = 0;
+ idx_t rightmost = _partitions.rightmost_empty(which_collector);
+ for (idx_t idx = _partitions.leftmost_empty(which_collector); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
+ assert(_partitions.in_free_set(which_collector, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
+ // Note: can_allocate_from() denotes that region is entirely empty
+ if (can_allocate_from(idx)) {
+ _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
+ transferred_regions++;
+ bytes_transferred += region_size_bytes;
+ }
+ idx = _partitions.find_index_of_next_available_region(which_collector, idx + 1);
+ }
+ return transferred_regions;
+ }
- _capacity += alloc_capacity(region);
- assert(_used <= _capacity, "must not use more than we have");
+ // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
+ size_t ShenandoahFreeSet::transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId collector_id,
+ size_t max_xfer_regions,
+ size_t& bytes_transferred) {
+ shenandoah_assert_heaplocked();
+ size_t transferred_regions = 0;
+ idx_t rightmost = _partitions.rightmost(collector_id);
+ for (idx_t idx = _partitions.leftmost(collector_id); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
+ assert(_partitions.in_free_set(collector_id, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
+ size_t ac = alloc_capacity(idx);
+ if (ac > 0) {
+ _partitions.move_from_partition_to_partition(idx, collector_id, ShenandoahFreeSetPartitionId::Mutator, ac);
+ transferred_regions++;
+ bytes_transferred += ac;
+ }
+ idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
+ }
+ return transferred_regions;
+ }
- assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
- _mutator_free_bitmap.set_bit(idx);
+ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
+ size_t collector_xfer = 0;
+ size_t old_collector_xfer = 0;
+
+ // Process empty regions within the Collector free partition
+ if ((max_xfer_regions > 0) &&
+ (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
+ <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
+ ShenandoahHeapLocker locker(_heap->lock());
+ max_xfer_regions -=
+ transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
+ collector_xfer);
+ }
+
+ // Process empty regions within the OldCollector free partition
+ if ((max_xfer_regions > 0) &&
+ (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)
+ <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector))) {
+ ShenandoahHeapLocker locker(_heap->lock());
+ size_t old_collector_regions =
+ transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions,
+ old_collector_xfer);
+ max_xfer_regions -= old_collector_regions;
+ if (old_collector_regions > 0) {
+ ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions);
}
}
- // Evac reserve: reserve trailing space for evacuations
- size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
- size_t reserved = 0;
+ // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
+ if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
+ <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
+ ShenandoahHeapLocker locker(_heap->lock());
+ max_xfer_regions -=
+ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
+ collector_xfer);
+ }
- for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
- if (reserved >= to_reserve) break;
+ size_t total_xfer = collector_xfer + old_collector_xfer;
+ log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
+ SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
+ byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
+ byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
+ byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer));
+ }
- ShenandoahHeapRegion* region = _heap->get_region(idx);
- if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
- _mutator_free_bitmap.clear_bit(idx);
- _collector_free_bitmap.set_bit(idx);
- size_t ac = alloc_capacity(region);
- _capacity -= ac;
- reserved += ac;
+
+ // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
+ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
+ size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
+ shenandoah_assert_heaplocked();
+ // This resets all state information, removing all regions from all sets.
+ clear();
+ log_debug(gc, free)("Rebuilding FreeSet");
+
+ // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
+ // mutator set otherwise. All trashed (cset) regions are affiliated young and placed in mutator set.
+ find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
+ }
+
+ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count) {
+ assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity");
+ if (ShenandoahHeap::heap()->mode()->is_generational()) {
+ ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
+ ShenandoahOldGeneration* old_gen = heap->old_generation();
+ ShenandoahYoungGeneration* young_gen = heap->young_generation();
+ size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+ size_t original_old_capacity = old_gen->max_capacity();
+ size_t new_old_capacity = old_region_count * region_size_bytes;
+ size_t new_young_capacity = young_region_count * region_size_bytes;
+ old_gen->set_capacity(new_old_capacity);
+ young_gen->set_capacity(new_young_capacity);
+
+ if (new_old_capacity > original_old_capacity) {
+ size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes;
+ log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
+ region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity));
+ } else if (new_old_capacity < original_old_capacity) {
+ size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes;
+ log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
+ region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity));
+ }
+ // This balances generations, so clear any pending request to balance.
+ old_gen->set_region_balance(0);
+ }
+ }
+
+ void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count,
+ bool have_evacuation_reserves) {
+ shenandoah_assert_heaplocked();
+ size_t young_reserve(0), old_reserve(0);
+
+ if (_heap->mode()->is_generational()) {
+ compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
+ young_reserve, old_reserve);
+ } else {
+ young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
+ old_reserve = 0;
+ }
+
+ // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy
+ // young_reserve and old_reserve.
+ reserve_regions(young_reserve, old_reserve, old_region_count);
+ size_t young_region_count = _heap->num_regions() - old_region_count;
+ establish_generation_sizes(young_region_count, old_region_count);
+ establish_old_collector_alloc_bias();
+ _partitions.assert_bounds();
+ log_status();
+ }
+
+ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
+ bool have_evacuation_reserves,
+ size_t& young_reserve_result, size_t& old_reserve_result) const {
+ shenandoah_assert_generational();
+ const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+ ShenandoahOldGeneration* const old_generation = _heap->old_generation();
+ size_t old_available = old_generation->available();
+ size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions();
+ ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
+ size_t young_capacity = young_generation->max_capacity();
+ size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
+
+ // Add in the regions we anticipate to be freed by evacuation of the collection set
+ old_unaffiliated_regions += old_cset_regions;
+ young_unaffiliated_regions += young_cset_regions;
+
+ // Consult old-region balance to make adjustments to current generation capacities and availability.
+ // The generation region transfers take place after we rebuild.
+ const ssize_t old_region_balance = old_generation->get_region_balance();
+ if (old_region_balance != 0) {
+ #ifdef ASSERT
+ if (old_region_balance > 0) {
+ assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
+ } else {
+ assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
}
+ #endif
+
+ ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
+ old_available -= xfer_bytes;
+ old_unaffiliated_regions -= old_region_balance;
+ young_capacity += xfer_bytes;
+ young_unaffiliated_regions += old_region_balance;
+ }
+
+ // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
+ // promotions and evacuations. The partition between which old memory is reserved for evacuation and
+ // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
+ // each PLAB's available memory.
+ if (have_evacuation_reserves) {
+ // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
+ const size_t promoted_reserve = old_generation->get_promoted_reserve();
+ const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
+ young_reserve_result = young_generation->get_evacuation_reserve();
+ old_reserve_result = promoted_reserve + old_evac_reserve;
+ assert(old_reserve_result <= old_available,
+ "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
+ promoted_reserve, old_evac_reserve, old_available);
+ } else {
+ // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
+ young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
+ // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
+ // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of
+ // unaffiliated regions.
+ old_reserve_result = old_available;
}
- recompute_bounds();
- assert_bounds();
+ // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
+ // free set. Because of this, old_available may not have enough memory to represent the intended reserve. Adjust
+ // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
+ // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
+ if (old_reserve_result >
+ _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
+ old_reserve_result =
+ _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
+ }
+
+ if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
+ young_reserve_result = young_unaffiliated_regions * region_size_bytes;
+ }
+ }
+
+ // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
+ // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
+ // into the collector set or old collector set in order to assure that the memory available for allocations within
+ // the collector set is at least to_reserve and the memory available for allocations within the old collector set
+ // is at least to_reserve_old.
+ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) {
+ for (size_t i = _heap->num_regions(); i > 0; i--) {
+ size_t idx = i - 1;
+ ShenandoahHeapRegion* r = _heap->get_region(idx);
+ if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
+ continue;
+ }
+
+ size_t ac = alloc_capacity(r);
+ assert (ac > 0, "Membership in free set implies has capacity");
+ assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD");
+
+ bool move_to_old_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old;
+ bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
+
+ if (!move_to_collector && !move_to_old_collector) {
+ // We've satisfied both to_reserve and to_reserved_old
+ break;
+ }
+
+ if (move_to_old_collector) {
+ // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher
+ // addresses than Collector regions. Presumably, OldCollector regions are more "stable" and less likely to
+ // be collected in the near future.
+ if (r->is_trash() || !r->is_affiliated()) {
+ // OLD regions that have available memory are already in the old_collector free set.
+ _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
+ ShenandoahFreeSetPartitionId::OldCollector, ac);
+ log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
+ log_debug(gc)(" Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
+ " Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
+ old_region_count++;
+ continue;
+ }
+ }
+
+ if (move_to_collector) {
+ // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
+ // they were entirely empty. This has the effect of causing new Mutator allocation to reside next to objects
+ // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
+ // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
+ // survivor objects is less likely to be selected for the collection set. This alternative implementation allows
+ // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
+ // occupy regions comprised entirely of ephemeral objects. These regions are highly likely to be included in the next
+ // collection set, and they are easily evacuated because they have low density of live objects.
+ _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
+ ShenandoahFreeSetPartitionId::Collector, ac);
+ log_debug(gc)(" Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
+ log_debug(gc)(" Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
+ " Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));
+ }
+ }
+
+ if (LogTarget(Info, gc, free)::is_enabled()) {
+ size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
+ if (old_reserve < to_reserve_old) {
+ log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
+ PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
+ }
+ size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
+ if (reserve < to_reserve) {
+ log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
+ PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
+ }
+ }
+ }
+
+ void ShenandoahFreeSet::establish_old_collector_alloc_bias() {
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
+ shenandoah_assert_heaplocked();
+
+ idx_t left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
+ idx_t right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector);
+ idx_t middle = (left_idx + right_idx) / 2;
+ size_t available_in_first_half = 0;
+ size_t available_in_second_half = 0;
+
+ for (idx_t index = left_idx; index < middle; index++) {
+ if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
+ ShenandoahHeapRegion* r = heap->get_region((size_t) index);
+ available_in_first_half += r->free();
+ }
+ }
+ for (idx_t index = middle; index <= right_idx; index++) {
+ if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
+ ShenandoahHeapRegion* r = heap->get_region(index);
+ available_in_second_half += r->free();
+ }
+ }
+
+ // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
+ // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
+ // request. Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
+ // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
+ _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector,
+ (available_in_second_half > available_in_first_half));
+ }
+
+ void ShenandoahFreeSet::log_status_under_lock() {
+ // Must not be heap locked, it acquires heap lock only when log is enabled
+ shenandoah_assert_not_heaplocked();
+ if (LogTarget(Info, gc, free)::is_enabled()
+ DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
+ ShenandoahHeapLocker locker(_heap->lock());
+ log_status();
+ }
}
void ShenandoahFreeSet::log_status() {
shenandoah_assert_heaplocked();
- LogTarget(Info, gc, ergo) lt;
+ #ifdef ASSERT
+ // Dump of the FreeSet details is only enabled if assertions are enabled
+ if (LogTarget(Debug, gc, free)::is_enabled()) {
+ #define BUFFER_SIZE 80
+ size_t retired_old = 0;
+ size_t retired_old_humongous = 0;
+ size_t retired_young = 0;
+ size_t retired_young_humongous = 0;
+ size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+ size_t retired_young_waste = 0;
+ size_t retired_old_waste = 0;
+ size_t consumed_collector = 0;
+ size_t consumed_old_collector = 0;
+ size_t consumed_mutator = 0;
+ size_t available_old = 0;
+ size_t available_young = 0;
+ size_t available_mutator = 0;
+ size_t available_collector = 0;
+ size_t available_old_collector = 0;
+
+ char buffer[BUFFER_SIZE];
+ for (uint i = 0; i < BUFFER_SIZE; i++) {
+ buffer[i] = '\0';
+ }
+
+ log_debug(gc)("FreeSet map legend:"
+ " M:mutator_free C:collector_free O:old_collector_free"
+ " H:humongous ~:retired old _:retired young");
+ log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocating from %s, "
+ " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
+ "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
+ _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left",
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector),
+ _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+ _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector),
+ _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left");
+
+ for (uint i = 0; i < _heap->num_regions(); i++) {
+ ShenandoahHeapRegion *r = _heap->get_region(i);
+ uint idx = i % 64;
+ if ((i != 0) && (idx == 0)) {
+ log_debug(gc)(" %6u: %s", i-64, buffer);
+ }
+ if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
+ size_t capacity = alloc_capacity(r);
+ assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in mutator_free set");
+ available_mutator += capacity;
+ consumed_mutator += region_size_bytes - capacity;
+ buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
+ } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
+ size_t capacity = alloc_capacity(r);
+ assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in collector_free set");
+ available_collector += capacity;
+ consumed_collector += region_size_bytes - capacity;
+ buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
+ } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, i)) {
+ size_t capacity = alloc_capacity(r);
+ available_old_collector += capacity;
+ consumed_old_collector += region_size_bytes - capacity;
+ buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
+ } else if (r->is_humongous()) {
+ if (r->is_old()) {
+ buffer[idx] = 'H';
+ retired_old_humongous += region_size_bytes;
+ } else {
+ buffer[idx] = 'h';
+ retired_young_humongous += region_size_bytes;
+ }
+ } else {
+ if (r->is_old()) {
+ buffer[idx] = '~';
+ retired_old_waste += alloc_capacity(r);
+ retired_old += region_size_bytes;
+ } else {
+ buffer[idx] = '_';
+ retired_young_waste += alloc_capacity(r);
+ retired_young += region_size_bytes;
+ }
+ }
+ }
+ uint remnant = _heap->num_regions() % 64;
+ if (remnant > 0) {
+ buffer[remnant] = '\0';
+ } else {
+ remnant = 64;
+ }
+ log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
+ }
+ #endif
+
+ LogTarget(Info, gc, free) lt;
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
{
- size_t last_idx = 0;
+ idx_t last_idx = 0;
size_t max = 0;
size_t max_contig = 0;
size_t empty_contig = 0;
size_t total_used = 0;
size_t total_free = 0;
size_t total_free_ext = 0;
- for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
- if (is_mutator_free(idx)) {
+ for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
+ idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) {
+ if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
ShenandoahHeapRegion *r = _heap->get_region(idx);
size_t free = alloc_capacity(r);
-
max = MAX2(max, free);
-
if (r->is_empty()) {
total_free_ext += free;
if (last_idx + 1 == idx) {
empty_contig++;
} else {
empty_contig = 1;
}
} else {
empty_contig = 0;
}
-
total_used += r->used();
total_free += free;
-
max_contig = MAX2(max_contig, empty_contig);
last_idx = idx;
}
}
size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
size_t free = capacity() - used();
+ // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
+ // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
+ // my internally tracked values of used() and free().
+ assert(free == total_free, "Free memory should match");
ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
);
frag_ext = 0;
}
ls.print(SIZE_FORMAT "%% external, ", frag_ext);
size_t frag_int;
- if (mutator_count() > 0) {
- frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());
+ if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
+ frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
+ / ShenandoahHeapRegion::region_size_bytes());
} else {
frag_int = 0;
}
ls.print(SIZE_FORMAT "%% internal; ", frag_int);
+ ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
+ byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used),
+ _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
}
{
size_t max = 0;
size_t total_free = 0;
+ size_t total_used = 0;
- for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
- if (is_collector_free(idx)) {
+ for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
+ idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
+ if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
ShenandoahHeapRegion *r = _heap->get_region(idx);
size_t free = alloc_capacity(r);
max = MAX2(max, free);
total_free += free;
+ total_used += r->used();
}
}
+ ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
+ byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
+ byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
+ byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
+ }
- ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",
+ if (_heap->mode()->is_generational()) {
+ size_t max = 0;
+ size_t total_free = 0;
+ size_t total_used = 0;
+
+ for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
+ idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); idx++) {
+ if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, idx)) {
+ ShenandoahHeapRegion *r = _heap->get_region(idx);
+ size_t free = alloc_capacity(r);
+ max = MAX2(max, free);
+ total_free += free;
+ total_used += r->used();
+ }
+ }
+ ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
- byte_size_in_proper_unit(max), proper_unit_for_byte_size(max));
+ byte_size_in_proper_unit(max), proper_unit_for_byte_size(max),
+ byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
}
}
}
HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
shenandoah_assert_heaplocked();
- assert_bounds();
-
- if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
+ if (ShenandoahHeapRegion::requires_humongous(req.size())) {
switch (req.type()) {
case ShenandoahAllocRequest::_alloc_shared:
case ShenandoahAllocRequest::_alloc_shared_gc:
in_new_region = true;
return allocate_contiguous(req);
+ case ShenandoahAllocRequest::_alloc_plab:
case ShenandoahAllocRequest::_alloc_gclab:
case ShenandoahAllocRequest::_alloc_tlab:
in_new_region = false;
- assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
- req.size(), ShenandoahHeapRegion::humongous_threshold_words());
+ assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size());
return nullptr;
default:
ShouldNotReachHere();
return nullptr;
}
} else {
return allocate_single(req, in_new_region);
}
}
- size_t ShenandoahFreeSet::unsafe_peek_free() const {
- // Deliberately not locked, this method is unsafe when free set is modified.
-
- for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
- if (index < _max && is_mutator_free(index)) {
- ShenandoahHeapRegion* r = _heap->get_region(index);
- if (r->free() >= MinTLABSize) {
- return r->free();
- }
- }
- }
-
- // It appears that no regions left
- return 0;
- }
-
void ShenandoahFreeSet::print_on(outputStream* out) const {
- out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
- for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
- if (is_mutator_free(index)) {
- _heap->get_region(index)->print_on(out);
- }
+ out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
+ idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
+ for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
+ assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
+ "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
+ _heap->get_region(index)->print_on(out);
+ index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
+ }
+ out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
+ rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
+ for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
+ assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
+ "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
+ _heap->get_region(index)->print_on(out);
+ index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);
}
- out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
- for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
- if (is_collector_free(index)) {
- _heap->get_region(index)->print_on(out);
+ if (_heap->mode()->is_generational()) {
+ out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector));
+ for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
+ index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) {
+ if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
+ _heap->get_region(index)->print_on(out);
+ }
}
}
}
- /*
- * Internal fragmentation metric: describes how fragmented the heap regions are.
- *
- * It is derived as:
- *
- * sum(used[i]^2, i=0..k)
- * IF = 1 - ------------------------------
- * C * sum(used[i], i=0..k)
- *
- * ...where k is the number of regions in computation, C is the region capacity, and
- * used[i] is the used space in the region.
- *
- * The non-linearity causes IF to be lower for the cases where the same total heap
- * used is densely packed. For example:
- * a) Heap is completely full => IF = 0
- * b) Heap is half full, first 50% regions are completely full => IF = 0
- * c) Heap is half full, each region is 50% full => IF = 1/2
- * d) Heap is quarter full, first 50% regions are completely full => IF = 0
- * e) Heap is quarter full, each region is 25% full => IF = 3/4
- * f) Heap has one small object per each region => IF =~ 1
- */
double ShenandoahFreeSet::internal_fragmentation() {
double squared = 0;
double linear = 0;
- int count = 0;
- for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
- if (is_mutator_free(index)) {
- ShenandoahHeapRegion* r = _heap->get_region(index);
- size_t used = r->used();
- squared += used * used;
- linear += used;
- count++;
- }
+ idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
+ for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
+ assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
+ "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
+ ShenandoahHeapRegion* r = _heap->get_region(index);
+ size_t used = r->used();
+ squared += used * used;
+ linear += used;
+ index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
}
- if (count > 0) {
+ if (linear > 0) {
double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
return 1 - s;
} else {
return 0;
}
}
- /*
- * External fragmentation metric: describes how fragmented the heap is.
- *
- * It is derived as:
- *
- * EF = 1 - largest_contiguous_free / total_free
- *
- * For example:
- * a) Heap is completely empty => EF = 0
- * b) Heap is completely full => EF = 0
- * c) Heap is first-half full => EF = 1/2
- * d) Heap is half full, full and empty regions interleave => EF =~ 1
- */
double ShenandoahFreeSet::external_fragmentation() {
- size_t last_idx = 0;
+ idx_t last_idx = 0;
size_t max_contig = 0;
size_t empty_contig = 0;
size_t free = 0;
- for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
- if (is_mutator_free(index)) {
- ShenandoahHeapRegion* r = _heap->get_region(index);
- if (r->is_empty()) {
- free += ShenandoahHeapRegion::region_size_bytes();
- if (last_idx + 1 == index) {
- empty_contig++;
- } else {
- empty_contig = 1;
- }
+ idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
+ for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
+ assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
+ "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
+ ShenandoahHeapRegion* r = _heap->get_region(index);
+ if (r->is_empty()) {
+ free += ShenandoahHeapRegion::region_size_bytes();
+ if (last_idx + 1 == index) {
+ empty_contig++;
} else {
- empty_contig = 0;
+ empty_contig = 1;
}
-
- max_contig = MAX2(max_contig, empty_contig);
- last_idx = index;
+ } else {
+ empty_contig = 0;
}
+ max_contig = MAX2(max_contig, empty_contig);
+ last_idx = index;
+ index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
}
if (free > 0) {
return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
} else {
return 0;
}
}
- #ifdef ASSERT
- void ShenandoahFreeSet::assert_bounds() const {
- // Performance invariants. Failing these would not break the free set, but performance
- // would suffer.
- assert (_mutator_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost, _max);
- assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
-
- assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost), "leftmost region should be free: " SIZE_FORMAT, _mutator_leftmost);
- assert (_mutator_rightmost == 0 || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
-
- size_t beg_off = _mutator_free_bitmap.find_first_set_bit(0);
- size_t end_off = _mutator_free_bitmap.find_first_set_bit(_mutator_rightmost + 1);
- assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
- assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _mutator_rightmost);
-
- assert (_collector_leftmost <= _max, "leftmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost, _max);
- assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
-
- assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost), "leftmost region should be free: " SIZE_FORMAT, _collector_leftmost);
- assert (_collector_rightmost == 0 || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
-
- beg_off = _collector_free_bitmap.find_first_set_bit(0);
- end_off = _collector_free_bitmap.find_first_set_bit(_collector_rightmost + 1);
- assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
- assert (end_off == _max, "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, end_off, _collector_rightmost);
- }
- #endif
< prev index next >