1 /* 2 * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 28 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 29 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 30 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 31 #include "gc/shenandoah/shenandoahUtils.hpp" 32 #include "runtime/atomic.hpp" 33 #include "services/memTracker.hpp" 34 #include "utilities/copy.hpp" 35 36 ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) : 37 _map_size(heap->num_regions()), 38 _region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()), 39 _map_space(space), 40 _cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)), 41 _biased_cset_map(_map_space.base()), 42 _heap(heap), 43 _has_old_regions(false), 44 _garbage(0), 45 _used(0), 46 _region_count(0), 47 _current_index(0) { 48 49 // The collection set map is reserved to cover the entire heap *and* zero addresses. 50 // This is needed to accept in-cset checks for both heap oops and NULLs, freeing 51 // high-performance code from checking for NULL first. 52 // 53 // Since heap_base can be far away, committing the entire map would waste memory. 54 // Therefore, we only commit the parts that are needed to operate: the heap view, 55 // and the zero page. 56 // 57 // Note: we could instead commit the entire map, and piggyback on OS virtual memory 58 // subsystem for mapping not-yet-written-to pages to a single physical backing page, 59 // but this is not guaranteed, and would confuse NMT and other memory accounting tools. 60 61 MemTracker::record_virtual_memory_type(_map_space.base(), mtGC); 62 63 size_t page_size = (size_t)os::vm_page_size(); 64 65 if (!_map_space.special()) { 66 // Commit entire pages that cover the heap cset map. 67 char* bot_addr = align_down(_cset_map, page_size); 68 char* top_addr = align_up(_cset_map + _map_size, page_size); 69 os::commit_memory_or_exit(bot_addr, pointer_delta(top_addr, bot_addr, 1), false, 70 "Unable to commit collection set bitmap: heap"); 71 72 // Commit the zero page, if not yet covered by heap cset map. 73 if (bot_addr != _biased_cset_map) { 74 os::commit_memory_or_exit(_biased_cset_map, page_size, false, 75 "Unable to commit collection set bitmap: zero page"); 76 } 77 } 78 79 Copy::zero_to_bytes(_cset_map, _map_size); 80 Copy::zero_to_bytes(_biased_cset_map, page_size); 81 } 82 83 void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) { 84 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 85 assert(Thread::current()->is_VM_thread(), "Must be VMThread"); 86 assert(!is_in(r), "Already in collection set"); 87 assert(!r->is_humongous(), "Only add regular regions to the collection set"); 88 89 _cset_map[r->index()] = 1; 90 91 if (r->affiliation() == YOUNG_GENERATION) { 92 _young_region_count++; 93 _young_bytes_to_evacuate += r->get_live_data_bytes(); 94 } else if (r->affiliation() == OLD_GENERATION) { 95 _old_region_count++; 96 _old_bytes_to_evacuate += r->get_live_data_bytes(); 97 } 98 99 _region_count++; 100 _has_old_regions |= r->is_old(); 101 _garbage += r->garbage(); 102 _used += r->used(); 103 // Update the region status too. State transition would be checked internally. 104 r->make_cset(); 105 } 106 107 void ShenandoahCollectionSet::clear() { 108 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 109 Copy::zero_to_bytes(_cset_map, _map_size); 110 111 #ifdef ASSERT 112 for (size_t index = 0; index < _heap->num_regions(); index ++) { 113 assert (!_heap->get_region(index)->is_cset(), "should have been cleared before"); 114 } 115 #endif 116 117 _garbage = 0; 118 _used = 0; 119 120 _region_count = 0; 121 _current_index = 0; 122 123 _young_region_count = 0; 124 _old_region_count = 0; 125 _young_bytes_to_evacuate = 0; 126 _old_bytes_to_evacuate = 0; 127 128 _has_old_regions = false; 129 } 130 131 ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() { 132 // This code is optimized for the case when collection set contains only 133 // a few regions. In this case, it is more constructive to check for is_in 134 // before hitting the (potentially contended) atomic index. 135 136 size_t max = _heap->num_regions(); 137 size_t old = Atomic::load(&_current_index); 138 139 for (size_t index = old; index < max; index++) { 140 if (is_in(index)) { 141 size_t cur = Atomic::cmpxchg(&_current_index, old, index + 1, memory_order_relaxed); 142 assert(cur >= old, "Always move forward"); 143 if (cur == old) { 144 // Successfully moved the claim index, this is our region. 145 return _heap->get_region(index); 146 } else { 147 // Somebody else moved the claim index, restart from there. 148 index = cur - 1; // adjust for loop post-increment 149 old = cur; 150 } 151 } 152 } 153 return NULL; 154 } 155 156 ShenandoahHeapRegion* ShenandoahCollectionSet::next() { 157 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 158 assert(Thread::current()->is_VM_thread(), "Must be VMThread"); 159 160 size_t max = _heap->num_regions(); 161 for (size_t index = _current_index; index < max; index++) { 162 if (is_in(index)) { 163 _current_index = index + 1; 164 return _heap->get_region(index); 165 } 166 } 167 168 return NULL; 169 } 170 171 void ShenandoahCollectionSet::print_on(outputStream* out) const { 172 out->print_cr("Collection Set : " SIZE_FORMAT "", count()); 173 174 debug_only(size_t regions = 0;) 175 for (size_t index = 0; index < _heap->num_regions(); index ++) { 176 if (is_in(index)) { 177 _heap->get_region(index)->print_on(out); 178 debug_only(regions ++;) 179 } 180 } 181 assert(regions == count(), "Must match"); 182 }