1 /* 2 * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/shared/cardTable.hpp" 27 #include "gc/shared/collectedHeap.hpp" 28 #include "gc/shared/gcLogPrecious.hpp" 29 #include "gc/shared/gc_globals.hpp" 30 #include "gc/shared/space.hpp" 31 #include "logging/log.hpp" 32 #include "memory/virtualspace.hpp" 33 #include "nmt/memTracker.hpp" 34 #include "runtime/init.hpp" 35 #include "runtime/java.hpp" 36 #include "runtime/os.hpp" 37 #include "utilities/align.hpp" 38 #if INCLUDE_PARALLELGC 39 #include "gc/parallel/objectStartArray.hpp" 40 #endif 41 42 uint CardTable::_card_shift = 0; 43 uint CardTable::_card_size = 0; 44 uint CardTable::_card_size_in_words = 0; 45 46 void CardTable::initialize_card_size() { 47 assert(UseG1GC || UseParallelGC || UseSerialGC || UseShenandoahGC, 48 "Initialize card size should only be called by card based collectors."); 49 50 _card_size = GCCardSizeInBytes; 51 _card_shift = log2i_exact(_card_size); 52 _card_size_in_words = _card_size / sizeof(HeapWord); 53 54 log_info_p(gc, init)("CardTable entry size: " UINT32_FORMAT, _card_size); 55 } 56 57 size_t CardTable::compute_byte_map_size(size_t num_bytes) { 58 assert(_page_size != 0, "uninitialized, check declaration order"); 59 const size_t granularity = os::vm_allocation_granularity(); 60 return align_up(num_bytes, MAX2(_page_size, granularity)); 61 } 62 63 CardTable::CardTable(MemRegion whole_heap) : 64 _whole_heap(whole_heap), 65 _page_size(os::vm_page_size()), 66 _byte_map_size(0), 67 _byte_map(nullptr), 68 _byte_map_base(nullptr) 69 { 70 assert((uintptr_t(_whole_heap.start()) & (_card_size - 1)) == 0, "heap must start at card boundary"); 71 assert((uintptr_t(_whole_heap.end()) & (_card_size - 1)) == 0, "heap must end at card boundary"); 72 } 73 74 void CardTable::initialize(void* region0_start, void* region1_start) { 75 size_t num_cards = cards_required(_whole_heap.word_size()); 76 77 size_t num_bytes = num_cards * sizeof(CardValue); 78 _byte_map_size = compute_byte_map_size(num_bytes); 79 80 HeapWord* low_bound = _whole_heap.start(); 81 HeapWord* high_bound = _whole_heap.end(); 82 83 const size_t rs_align = _page_size == os::vm_page_size() ? 0 : 84 MAX2(_page_size, os::vm_allocation_granularity()); 85 ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size); 86 87 MemTracker::record_virtual_memory_tag((address)heap_rs.base(), mtGC); 88 89 os::trace_page_sizes("Card Table", num_bytes, num_bytes, 90 heap_rs.base(), heap_rs.size(), _page_size); 91 if (!heap_rs.is_reserved()) { 92 vm_exit_during_initialization("Could not reserve enough space for the " 93 "card marking array"); 94 } 95 96 // The assembler store_check code will do an unsigned shift of the oop, 97 // then add it to _byte_map_base, i.e. 98 // 99 // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift) 100 _byte_map = (CardValue*) heap_rs.base(); 101 _byte_map_base = _byte_map - (uintptr_t(low_bound) >> _card_shift); 102 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); 103 assert(byte_for(high_bound-1) <= &_byte_map[last_valid_index()], "Checking end of map"); 104 105 initialize_covered_region(region0_start, region1_start); 106 107 log_trace(gc, barrier)("CardTable::CardTable: "); 108 log_trace(gc, barrier)(" &_byte_map[0]: " PTR_FORMAT " &_byte_map[last_valid_index()]: " PTR_FORMAT, 109 p2i(&_byte_map[0]), p2i(&_byte_map[last_valid_index()])); 110 log_trace(gc, barrier)(" _byte_map_base: " PTR_FORMAT, p2i(_byte_map_base)); 111 } 112 113 MemRegion CardTable::committed_for(const MemRegion mr) const { 114 HeapWord* addr_l = (HeapWord*)align_down(byte_for(mr.start()), _page_size); 115 HeapWord* addr_r = mr.is_empty() 116 ? addr_l 117 : (HeapWord*)align_up(byte_after(mr.last()), _page_size); 118 119 if (mr.start() == _covered[0].start()) { 120 // In case the card for gen-boundary is not page-size aligned, the crossing page belongs to _covered[1]. 121 addr_r = MIN2(addr_r, (HeapWord*)align_down(byte_for(_covered[1].start()), _page_size)); 122 } 123 124 return MemRegion(addr_l, addr_r); 125 } 126 127 void CardTable::initialize_covered_region(void* region0_start, void* region1_start) { 128 assert(_whole_heap.start() == region0_start, "precondition"); 129 assert(region0_start < region1_start, "precondition"); 130 131 assert(_covered[0].start() == nullptr, "precondition"); 132 assert(_covered[1].start() == nullptr, "precondition"); 133 134 _covered[0] = MemRegion((HeapWord*)region0_start, (size_t)0); 135 _covered[1] = MemRegion((HeapWord*)region1_start, (size_t)0); 136 } 137 138 void CardTable::resize_covered_region(MemRegion new_region) { 139 assert(UseSerialGC || UseParallelGC, "only these two collectors"); 140 assert(_whole_heap.contains(new_region), 141 "attempt to cover area not in reserved area"); 142 assert(_covered[0].start() != nullptr, "precondition"); 143 assert(_covered[1].start() != nullptr, "precondition"); 144 145 int idx = new_region.start() == _whole_heap.start() ? 0 : 1; 146 147 // We don't allow changes to the start of a region, only the end. 148 assert(_covered[idx].start() == new_region.start(), "inv"); 149 150 MemRegion old_committed = committed_for(_covered[idx]); 151 152 _covered[idx] = new_region; 153 154 MemRegion new_committed = committed_for(new_region); 155 156 if (new_committed.word_size() == old_committed.word_size()) { 157 return; 158 } 159 160 if (new_committed.word_size() > old_committed.word_size()) { 161 // Expand. 162 MemRegion delta = MemRegion(old_committed.end(), 163 new_committed.word_size() - old_committed.word_size()); 164 165 os::commit_memory_or_exit((char*)delta.start(), 166 delta.byte_size(), 167 _page_size, 168 !ExecMem, 169 "card table expansion"); 170 171 memset(delta.start(), clean_card, delta.byte_size()); 172 } else { 173 // Shrink. 174 MemRegion delta = MemRegion(new_committed.end(), 175 old_committed.word_size() - new_committed.word_size()); 176 bool res = os::uncommit_memory((char*)delta.start(), 177 delta.byte_size()); 178 assert(res, "uncommit should succeed"); 179 } 180 181 log_trace(gc, barrier)("CardTable::resize_covered_region: "); 182 log_trace(gc, barrier)(" _covered[%d].start(): " PTR_FORMAT " _covered[%d].last(): " PTR_FORMAT, 183 idx, p2i(_covered[idx].start()), idx, p2i(_covered[idx].last())); 184 log_trace(gc, barrier)(" committed_start: " PTR_FORMAT " committed_last: " PTR_FORMAT, 185 p2i(new_committed.start()), p2i(new_committed.last())); 186 log_trace(gc, barrier)(" byte_for(start): " PTR_FORMAT " byte_for(last): " PTR_FORMAT, 187 p2i(byte_for(_covered[idx].start())), p2i(byte_for(_covered[idx].last()))); 188 log_trace(gc, barrier)(" addr_for(start): " PTR_FORMAT " addr_for(last): " PTR_FORMAT, 189 p2i(addr_for((CardValue*) new_committed.start())), p2i(addr_for((CardValue*) new_committed.last()))); 190 191 #ifdef ASSERT 192 // Touch the last card of the covered region to show that it 193 // is committed (or SEGV). 194 if (is_init_completed()) { 195 (void) (*(volatile CardValue*)byte_for(_covered[idx].last())); 196 } 197 #endif 198 } 199 200 // Note that these versions are precise! The scanning code has to handle the 201 // fact that the write barrier may be either precise or imprecise. 202 void CardTable::dirty_MemRegion(MemRegion mr) { 203 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); 204 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); 205 assert(_covered[0].contains(mr) || _covered[1].contains(mr), "precondition"); 206 CardValue* cur = byte_for(mr.start()); 207 CardValue* last = byte_after(mr.last()); 208 memset(cur, dirty_card, pointer_delta(last, cur, sizeof(CardValue))); 209 } 210 211 void CardTable::clear_MemRegion(MemRegion mr) { 212 // Be conservative: only clean cards entirely contained within the 213 // region. 214 CardValue* cur; 215 if (mr.start() == _whole_heap.start()) { 216 cur = byte_for(mr.start()); 217 } else { 218 assert(mr.start() > _whole_heap.start(), "mr is not covered."); 219 cur = byte_after(mr.start() - 1); 220 } 221 CardValue* last = byte_after(mr.last()); 222 memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue))); 223 } 224 225 uintx CardTable::ct_max_alignment_constraint() { 226 // Calculate maximum alignment using GCCardSizeInBytes as card_size hasn't been set yet 227 return GCCardSizeInBytes * os::vm_page_size(); 228 } 229 230 #ifndef PRODUCT 231 void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) { 232 CardValue* start = byte_for(mr.start()); 233 CardValue* end = byte_for(mr.last()); 234 bool failures = false; 235 for (CardValue* curr = start; curr <= end; ++curr) { 236 CardValue curr_val = *curr; 237 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); 238 if (failed) { 239 if (!failures) { 240 log_error(gc, verify)("== CT verification failed: [" PTR_FORMAT "," PTR_FORMAT "]", p2i(start), p2i(end)); 241 log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); 242 failures = true; 243 } 244 log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", 245 p2i(curr), p2i(addr_for(curr)), 246 p2i((HeapWord*) (((size_t) addr_for(curr)) + _card_size)), 247 (int) curr_val); 248 } 249 } 250 guarantee(!failures, "there should not have been any failures"); 251 } 252 253 void CardTable::verify_not_dirty_region(MemRegion mr) { 254 verify_region(mr, dirty_card, false /* val_equals */); 255 } 256 257 void CardTable::verify_dirty_region(MemRegion mr) { 258 verify_region(mr, dirty_card, true /* val_equals */); 259 } 260 #endif 261 262 void CardTable::print_on(outputStream* st) const { 263 st->print_cr("Card table byte_map: [" PTR_FORMAT "," PTR_FORMAT "] _byte_map_base: " PTR_FORMAT, 264 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base)); 265 }