1 /* 2 * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 31 #include "gc/shenandoah/shenandoahUtils.hpp" 32 #include "runtime/atomic.hpp" 33 #include "services/memTracker.hpp" 34 #include "utilities/copy.hpp" 35 36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) : 37 _map_size(heap->num_regions()), 38 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()), 39 _map_space(space), 40 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)), 41 _biased_cset_map(_map_space.base()), 42 _heap(heap), 43 _has_old_regions(false), 44 _garbage(0), 45 _used(0), 46 _live(0), 47 _region_count(0), 48 _old_garbage(0), 49 _current_index(0) { 50 51 // The collection set map is reserved to cover the entire heap *and* zero addresses. 52 // This is needed to accept in-cset checks for both heap oops and nulls, freeing 53 // high-performance code from checking for null first. 54 // 55 // Since heap_base can be far away, committing the entire map would waste memory. 56 // Therefore, we only commit the parts that are needed to operate: the heap view, 57 // and the zero page. 58 // 59 // Note: we could instead commit the entire map, and piggyback on OS virtual memory 60 // subsystem for mapping not-yet-written-to pages to a single physical backing page, 61 // but this is not guaranteed, and would confuse NMT and other memory accounting tools. 62 63 MemTracker::record_virtual_memory_type(_map_space.base(), mtGC); 64 65 size_t page_size = os::vm_page_size(); 66 67 if (!_map_space.special()) { 68 // Commit entire pages that cover the heap cset map. 69 char* bot_addr = align_down(_cset_map, page_size); 70 char* top_addr = align_up(_cset_map + _map_size, page_size); 71 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false, 72 "Unable to commit collection set bitmap: heap"); 73 74 // Commit the zero page, if not yet covered by heap cset map. 75 if (bot_addr != _biased_cset_map) { 76 os::commit_memory_or_exit(_biased_cset_map, page_size, false, 77 "Unable to commit collection set bitmap: zero page"); 78 } 79 } 80 81 Copy::zero_to_bytes(_cset_map, _map_size); 82 Copy::zero_to_bytes(_biased_cset_map, page_size); 83 } 84 85 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) { 86 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 87 assert(Thread::current()->is_VM_thread(), "Must be VMThread"); 88 assert(!is_in(r), "Already in collection set"); 89 assert(!r->is_humongous(), "Only add regular regions to the collection set"); 90 91 _cset_map[r->index()] = 1; 92 93 if (r->affiliation() == YOUNG_GENERATION) { 94 _young_region_count++; 95 _young_bytes_to_evacuate += r->get_live_data_bytes(); 96 if (r->age() >= InitialTenuringThreshold) { 97 _young_bytes_to_promote += r->get_live_data_bytes(); 98 } 99 } else if (r->affiliation() == OLD_GENERATION) { 100 _old_region_count++; 101 _old_bytes_to_evacuate += r->get_live_data_bytes(); 102 _old_garbage += r->garbage(); 103 } 104 105 _region_count++; 106 _has_old_regions |= r->is_old(); 107 _garbage += r->garbage(); 108 _used += r->used(); 109 _live += r->get_live_data_bytes(); 110 // Update the region status too. State transition would be checked internally. 111 r->make_cset(); 112 } 113 114 void ShenandoahCollectionSet::clear() { 115 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 116 Copy::zero_to_bytes(_cset_map, _map_size); 117 118 #ifdef ASSERT 119 for (size_t index = 0; index < _heap->num_regions(); index ++) { 120 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before"); 121 } 122 #endif 123 124 _garbage = 0; 125 _old_garbage = 0; 126 _used = 0; 127 _live = 0; 128 129 _region_count = 0; 130 _current_index = 0; 131 132 _young_region_count = 0; 133 _young_bytes_to_evacuate = 0; 134 _young_bytes_to_promote = 0; 135 136 _old_region_count = 0; 137 _old_bytes_to_evacuate = 0; 138 139 _has_old_regions = false; 140 } 141 142 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { 143 // This code is optimized for the case when collection set contains only 144 // a few regions. In this case, it is more constructive to check for is_in 145 // before hitting the (potentially contended) atomic index. 146 147 size_t max = _heap->num_regions(); 148 size_t old = Atomic::load(&_current_index); 149 150 for (size_t index = old; index < max; index++) { 151 if (is_in(index)) { 152 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed); 153 assert(cur >= old, "Always move forward"); 154 if (cur == old) { 155 // Successfully moved the claim index, this is our region. 156 return _heap->get_region(index); 157 } else { 158 // Somebody else moved the claim index, restart from there. 159 index = cur - 1; // adjust for loop post-increment 160 old = cur; 161 } 162 } 163 } 164 return nullptr; 165 } 166 167 ShenandoahHeapRegion* ShenandoahCollectionSet::next() { 168 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 169 assert(Thread::current()->is_VM_thread(), "Must be VMThread"); 170 171 size_t max = _heap->num_regions(); 172 for (size_t index = _current_index; index < max; index++) { 173 if (is_in(index)) { 174 _current_index = index + 1; 175 return _heap->get_region(index); 176 } 177 } 178 179 return nullptr; 180 } 181 182 void ShenandoahCollectionSet::print_on(outputStream* out) const { 183 out->print_cr("Collection Set: Regions: " 184 SIZE_FORMAT ", Garbage: " SIZE_FORMAT "%s, Live: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s", count(), 185 byte_size_in_proper_unit(garbage()), proper_unit_for_byte_size(garbage()), 186 byte_size_in_proper_unit(live()), proper_unit_for_byte_size(live()), 187 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used())); 188 189 debug_only(size_t regions = 0;) 190 for (size_t index = 0; index < _heap->num_regions(); index ++) { 191 if (is_in(index)) { 192 _heap->get_region(index)->print_on(out); 193 debug_only(regions ++;) 194 } 195 } 196 assert(regions == count(), "Must match"); 197 }