166
167 size_t ShenandoahGeneration::get_evacuation_reserve() const {
168 return _evacuation_reserve;
169 }
170
171 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) {
172 _evacuation_reserve += increment;
173 }
174
175 void ShenandoahGeneration::log_status(const char *msg) const {
176 typedef LogTarget(Info, gc, ergo) LogGcInfo;
177
178 if (!LogGcInfo::is_enabled()) {
179 return;
180 }
181
182 // Not under a lock here, so read each of these once to make sure
183 // byte size in proper unit and proper unit for byte size are consistent.
184 const size_t v_used = used();
185 const size_t v_used_regions = used_regions_size();
186 const size_t v_soft_max_capacity = soft_max_capacity();
187 const size_t v_max_capacity = max_capacity();
188 const size_t v_available = available();
189 const size_t v_humongous_waste = get_humongous_waste();
190
191 const LogGcInfo target;
192 LogStream ls(target);
193 ls.print("%s: ", msg);
194 if (_type != NON_GEN) {
195 ls.print("%s generation ", name());
196 }
197
198 ls.print_cr("used: " PROPERFMT ", used regions: " PROPERFMT ", humongous waste: " PROPERFMT
199 ", soft capacity: " PROPERFMT ", max capacity: " PROPERFMT ", available: " PROPERFMT,
200 PROPERFMTARGS(v_used), PROPERFMTARGS(v_used_regions), PROPERFMTARGS(v_humongous_waste),
201 PROPERFMTARGS(v_soft_max_capacity), PROPERFMTARGS(v_max_capacity), PROPERFMTARGS(v_available));
202 }
203
204 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC>
205 void ShenandoahGeneration::reset_mark_bitmap() {
206 ShenandoahHeap* heap = ShenandoahHeap::heap();
782 _is_marking_complete.unset();
783 }
784
785 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
786 assert(is_mark_complete(), "Marking must be completed.");
787 return ShenandoahHeap::heap()->marking_context();
788 }
789
790 void ShenandoahGeneration::cancel_marking() {
791 log_info(gc)("Cancel marking: %s", name());
792 if (is_concurrent_mark_in_progress()) {
793 set_mark_incomplete();
794 }
795 _task_queues->clear();
796 ref_processor()->abandon_partial_discovery();
797 set_concurrent_mark_in_progress(false);
798 }
799
800 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
801 uint max_workers,
802 size_t max_capacity,
803 size_t soft_max_capacity) :
804 _type(type),
805 _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
806 _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
807 _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0),
808 _used(0), _bytes_allocated_since_gc_start(0),
809 _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
810 _heuristics(nullptr)
811 {
812 _is_marking_complete.set();
813 assert(max_workers > 0, "At least one queue");
814 for (uint i = 0; i < max_workers; ++i) {
815 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
816 _task_queues->register_queue(i, task_queue);
817 }
818 }
819
820 ShenandoahGeneration::~ShenandoahGeneration() {
821 for (uint i = 0; i < _task_queues->size(); ++i) {
822 ShenandoahObjToScanQueue* q = _task_queues->queue(i);
823 delete q;
824 }
825 delete _task_queues;
826 }
827
828 void ShenandoahGeneration::reserve_task_queues(uint workers) {
829 _task_queues->reserve(workers);
935 } else {
936 result -= used_regions;
937 }
938 return result;
939 }
940
941 size_t ShenandoahGeneration::used_regions_size() const {
942 return used_regions() * ShenandoahHeapRegion::region_size_bytes();
943 }
944
945 size_t ShenandoahGeneration::available() const {
946 return available(max_capacity());
947 }
948
949 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
950 size_t ShenandoahGeneration::available_with_reserve() const {
951 return available(max_capacity());
952 }
953
954 size_t ShenandoahGeneration::soft_available() const {
955 return available(soft_max_capacity());
956 }
957
958 size_t ShenandoahGeneration::available(size_t capacity) const {
959 size_t in_use = used() + get_humongous_waste();
960 return in_use > capacity ? 0 : capacity - in_use;
961 }
962
963 size_t ShenandoahGeneration::increase_capacity(size_t increment) {
964 shenandoah_assert_heaplocked_or_safepoint();
965
966 // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb
967 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
968 // in place.
969 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
970 (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
971 assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
972 _max_capacity += increment;
973
974 // This detects arithmetic wraparound on _used
975 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
166
167 size_t ShenandoahGeneration::get_evacuation_reserve() const {
168 return _evacuation_reserve;
169 }
170
171 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) {
172 _evacuation_reserve += increment;
173 }
174
175 void ShenandoahGeneration::log_status(const char *msg) const {
176 typedef LogTarget(Info, gc, ergo) LogGcInfo;
177
178 if (!LogGcInfo::is_enabled()) {
179 return;
180 }
181
182 // Not under a lock here, so read each of these once to make sure
183 // byte size in proper unit and proper unit for byte size are consistent.
184 const size_t v_used = used();
185 const size_t v_used_regions = used_regions_size();
186 const size_t v_soft_max_capacity = ShenandoahHeap::heap()->soft_max_capacity();
187 const size_t v_max_capacity = max_capacity();
188 const size_t v_available = available();
189 const size_t v_humongous_waste = get_humongous_waste();
190
191 const LogGcInfo target;
192 LogStream ls(target);
193 ls.print("%s: ", msg);
194 if (_type != NON_GEN) {
195 ls.print("%s generation ", name());
196 }
197
198 ls.print_cr("used: " PROPERFMT ", used regions: " PROPERFMT ", humongous waste: " PROPERFMT
199 ", soft capacity: " PROPERFMT ", max capacity: " PROPERFMT ", available: " PROPERFMT,
200 PROPERFMTARGS(v_used), PROPERFMTARGS(v_used_regions), PROPERFMTARGS(v_humongous_waste),
201 PROPERFMTARGS(v_soft_max_capacity), PROPERFMTARGS(v_max_capacity), PROPERFMTARGS(v_available));
202 }
203
204 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC>
205 void ShenandoahGeneration::reset_mark_bitmap() {
206 ShenandoahHeap* heap = ShenandoahHeap::heap();
782 _is_marking_complete.unset();
783 }
784
785 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
786 assert(is_mark_complete(), "Marking must be completed.");
787 return ShenandoahHeap::heap()->marking_context();
788 }
789
790 void ShenandoahGeneration::cancel_marking() {
791 log_info(gc)("Cancel marking: %s", name());
792 if (is_concurrent_mark_in_progress()) {
793 set_mark_incomplete();
794 }
795 _task_queues->clear();
796 ref_processor()->abandon_partial_discovery();
797 set_concurrent_mark_in_progress(false);
798 }
799
800 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
801 uint max_workers,
802 size_t max_capacity) :
803 _type(type),
804 _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
805 _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
806 _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0),
807 _used(0), _bytes_allocated_since_gc_start(0),
808 _max_capacity(max_capacity),
809 _heuristics(nullptr)
810 {
811 _is_marking_complete.set();
812 assert(max_workers > 0, "At least one queue");
813 for (uint i = 0; i < max_workers; ++i) {
814 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
815 _task_queues->register_queue(i, task_queue);
816 }
817 }
818
819 ShenandoahGeneration::~ShenandoahGeneration() {
820 for (uint i = 0; i < _task_queues->size(); ++i) {
821 ShenandoahObjToScanQueue* q = _task_queues->queue(i);
822 delete q;
823 }
824 delete _task_queues;
825 }
826
827 void ShenandoahGeneration::reserve_task_queues(uint workers) {
828 _task_queues->reserve(workers);
934 } else {
935 result -= used_regions;
936 }
937 return result;
938 }
939
940 size_t ShenandoahGeneration::used_regions_size() const {
941 return used_regions() * ShenandoahHeapRegion::region_size_bytes();
942 }
943
944 size_t ShenandoahGeneration::available() const {
945 return available(max_capacity());
946 }
947
948 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
949 size_t ShenandoahGeneration::available_with_reserve() const {
950 return available(max_capacity());
951 }
952
953 size_t ShenandoahGeneration::soft_available() const {
954 return available(ShenandoahHeap::heap()->soft_max_capacity());
955 }
956
957 size_t ShenandoahGeneration::available(size_t capacity) const {
958 size_t in_use = used() + get_humongous_waste();
959 return in_use > capacity ? 0 : capacity - in_use;
960 }
961
962 size_t ShenandoahGeneration::increase_capacity(size_t increment) {
963 shenandoah_assert_heaplocked_or_safepoint();
964
965 // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb
966 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
967 // in place.
968 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
969 (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
970 assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
971 _max_capacity += increment;
972
973 // This detects arithmetic wraparound on _used
974 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|