165
166 size_t ShenandoahGeneration::get_evacuation_reserve() const {
167 return _evacuation_reserve;
168 }
169
170 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) {
171 _evacuation_reserve += increment;
172 }
173
174 void ShenandoahGeneration::log_status(const char *msg) const {
175 typedef LogTarget(Info, gc, ergo) LogGcInfo;
176
177 if (!LogGcInfo::is_enabled()) {
178 return;
179 }
180
181 // Not under a lock here, so read each of these once to make sure
182 // byte size in proper unit and proper unit for byte size are consistent.
183 const size_t v_used = used();
184 const size_t v_used_regions = used_regions_size();
185 const size_t v_soft_max_capacity = soft_max_capacity();
186 const size_t v_max_capacity = max_capacity();
187 const size_t v_available = available();
188 const size_t v_humongous_waste = get_humongous_waste();
189
190 const LogGcInfo target;
191 LogStream ls(target);
192 ls.print("%s: ", msg);
193 if (_type != NON_GEN) {
194 ls.print("%s generation ", name());
195 }
196
197 ls.print_cr("used: " PROPERFMT ", used regions: " PROPERFMT ", humongous waste: " PROPERFMT
198 ", soft capacity: " PROPERFMT ", max capacity: " PROPERFMT ", available: " PROPERFMT,
199 PROPERFMTARGS(v_used), PROPERFMTARGS(v_used_regions), PROPERFMTARGS(v_humongous_waste),
200 PROPERFMTARGS(v_soft_max_capacity), PROPERFMTARGS(v_max_capacity), PROPERFMTARGS(v_available));
201 }
202
203 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC>
204 void ShenandoahGeneration::reset_mark_bitmap() {
205 ShenandoahHeap* heap = ShenandoahHeap::heap();
780 _is_marking_complete.unset();
781 }
782
783 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
784 assert(is_mark_complete(), "Marking must be completed.");
785 return ShenandoahHeap::heap()->marking_context();
786 }
787
788 void ShenandoahGeneration::cancel_marking() {
789 log_info(gc)("Cancel marking: %s", name());
790 if (is_concurrent_mark_in_progress()) {
791 set_mark_incomplete();
792 }
793 _task_queues->clear();
794 ref_processor()->abandon_partial_discovery();
795 set_concurrent_mark_in_progress(false);
796 }
797
798 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
799 uint max_workers,
800 size_t max_capacity,
801 size_t soft_max_capacity) :
802 _type(type),
803 _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
804 _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
805 _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0),
806 _used(0), _bytes_allocated_since_gc_start(0),
807 _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
808 _heuristics(nullptr)
809 {
810 _is_marking_complete.set();
811 assert(max_workers > 0, "At least one queue");
812 for (uint i = 0; i < max_workers; ++i) {
813 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
814 _task_queues->register_queue(i, task_queue);
815 }
816 }
817
818 ShenandoahGeneration::~ShenandoahGeneration() {
819 for (uint i = 0; i < _task_queues->size(); ++i) {
820 ShenandoahObjToScanQueue* q = _task_queues->queue(i);
821 delete q;
822 }
823 delete _task_queues;
824 }
825
826 void ShenandoahGeneration::reserve_task_queues(uint workers) {
827 _task_queues->reserve(workers);
933 } else {
934 result -= used_regions;
935 }
936 return result;
937 }
938
939 size_t ShenandoahGeneration::used_regions_size() const {
940 return used_regions() * ShenandoahHeapRegion::region_size_bytes();
941 }
942
943 size_t ShenandoahGeneration::available() const {
944 return available(max_capacity());
945 }
946
947 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
948 size_t ShenandoahGeneration::available_with_reserve() const {
949 return available(max_capacity());
950 }
951
952 size_t ShenandoahGeneration::soft_available() const {
953 return available(soft_max_capacity());
954 }
955
956 size_t ShenandoahGeneration::available(size_t capacity) const {
957 size_t in_use = used() + get_humongous_waste();
958 return in_use > capacity ? 0 : capacity - in_use;
959 }
960
961 size_t ShenandoahGeneration::increase_capacity(size_t increment) {
962 shenandoah_assert_heaplocked_or_safepoint();
963
964 // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb
965 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
966 // in place.
967 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
968 (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
969 assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
970 _max_capacity += increment;
971
972 // This detects arithmetic wraparound on _used
973 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|
165
166 size_t ShenandoahGeneration::get_evacuation_reserve() const {
167 return _evacuation_reserve;
168 }
169
170 void ShenandoahGeneration::augment_evacuation_reserve(size_t increment) {
171 _evacuation_reserve += increment;
172 }
173
174 void ShenandoahGeneration::log_status(const char *msg) const {
175 typedef LogTarget(Info, gc, ergo) LogGcInfo;
176
177 if (!LogGcInfo::is_enabled()) {
178 return;
179 }
180
181 // Not under a lock here, so read each of these once to make sure
182 // byte size in proper unit and proper unit for byte size are consistent.
183 const size_t v_used = used();
184 const size_t v_used_regions = used_regions_size();
185 const size_t v_soft_max_capacity = ShenandoahHeap::heap()->soft_max_capacity();
186 const size_t v_max_capacity = max_capacity();
187 const size_t v_available = available();
188 const size_t v_humongous_waste = get_humongous_waste();
189
190 const LogGcInfo target;
191 LogStream ls(target);
192 ls.print("%s: ", msg);
193 if (_type != NON_GEN) {
194 ls.print("%s generation ", name());
195 }
196
197 ls.print_cr("used: " PROPERFMT ", used regions: " PROPERFMT ", humongous waste: " PROPERFMT
198 ", soft capacity: " PROPERFMT ", max capacity: " PROPERFMT ", available: " PROPERFMT,
199 PROPERFMTARGS(v_used), PROPERFMTARGS(v_used_regions), PROPERFMTARGS(v_humongous_waste),
200 PROPERFMTARGS(v_soft_max_capacity), PROPERFMTARGS(v_max_capacity), PROPERFMTARGS(v_available));
201 }
202
203 template <bool PREPARE_FOR_CURRENT_CYCLE, bool FULL_GC>
204 void ShenandoahGeneration::reset_mark_bitmap() {
205 ShenandoahHeap* heap = ShenandoahHeap::heap();
780 _is_marking_complete.unset();
781 }
782
783 ShenandoahMarkingContext* ShenandoahGeneration::complete_marking_context() {
784 assert(is_mark_complete(), "Marking must be completed.");
785 return ShenandoahHeap::heap()->marking_context();
786 }
787
788 void ShenandoahGeneration::cancel_marking() {
789 log_info(gc)("Cancel marking: %s", name());
790 if (is_concurrent_mark_in_progress()) {
791 set_mark_incomplete();
792 }
793 _task_queues->clear();
794 ref_processor()->abandon_partial_discovery();
795 set_concurrent_mark_in_progress(false);
796 }
797
798 ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
799 uint max_workers,
800 size_t max_capacity) :
801 _type(type),
802 _task_queues(new ShenandoahObjToScanQueueSet(max_workers)),
803 _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))),
804 _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0),
805 _used(0), _bytes_allocated_since_gc_start(0),
806 _max_capacity(max_capacity),
807 _heuristics(nullptr)
808 {
809 _is_marking_complete.set();
810 assert(max_workers > 0, "At least one queue");
811 for (uint i = 0; i < max_workers; ++i) {
812 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
813 _task_queues->register_queue(i, task_queue);
814 }
815 }
816
817 ShenandoahGeneration::~ShenandoahGeneration() {
818 for (uint i = 0; i < _task_queues->size(); ++i) {
819 ShenandoahObjToScanQueue* q = _task_queues->queue(i);
820 delete q;
821 }
822 delete _task_queues;
823 }
824
825 void ShenandoahGeneration::reserve_task_queues(uint workers) {
826 _task_queues->reserve(workers);
932 } else {
933 result -= used_regions;
934 }
935 return result;
936 }
937
938 size_t ShenandoahGeneration::used_regions_size() const {
939 return used_regions() * ShenandoahHeapRegion::region_size_bytes();
940 }
941
942 size_t ShenandoahGeneration::available() const {
943 return available(max_capacity());
944 }
945
946 // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector.
947 size_t ShenandoahGeneration::available_with_reserve() const {
948 return available(max_capacity());
949 }
950
951 size_t ShenandoahGeneration::soft_available() const {
952 return available(ShenandoahHeap::heap()->soft_max_capacity());
953 }
954
955 size_t ShenandoahGeneration::available(size_t capacity) const {
956 size_t in_use = used() + get_humongous_waste();
957 return in_use > capacity ? 0 : capacity - in_use;
958 }
959
960 size_t ShenandoahGeneration::increase_capacity(size_t increment) {
961 shenandoah_assert_heaplocked_or_safepoint();
962
963 // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb
964 // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
965 // in place.
966 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
967 (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
968 assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
969 _max_capacity += increment;
970
971 // This detects arithmetic wraparound on _used
972 assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
|