1 /*
2 * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/cardTable.hpp"
27 #include "gc/shared/collectedHeap.hpp"
28 #include "gc/shared/gcLogPrecious.hpp"
29 #include "gc/shared/gc_globals.hpp"
30 #include "gc/shared/space.inline.hpp"
31 #include "logging/log.hpp"
32 #include "memory/virtualspace.hpp"
33 #include "runtime/java.hpp"
34 #include "runtime/os.hpp"
35 #include "services/memTracker.hpp"
36 #include "utilities/align.hpp"
37 #if INCLUDE_PARALLELGC
38 #include "gc/parallel/objectStartArray.hpp"
39 #endif
40
41 uint CardTable::_card_shift = 0;
42 uint CardTable::_card_size = 0;
43 uint CardTable::_card_size_in_words = 0;
44
45 void CardTable::initialize_card_size() {
46 assert(UseG1GC || UseParallelGC || UseSerialGC,
47 "Initialize card size should only be called by card based collectors.");
48
49 _card_size = GCCardSizeInBytes;
50 _card_shift = log2i_exact(_card_size);
51 _card_size_in_words = _card_size / sizeof(HeapWord);
52
53 // Set blockOffsetTable size based on card table entry size
54 BOTConstants::initialize_bot_size(_card_shift);
55
56 #if INCLUDE_PARALLELGC
57 // Set ObjectStartArray block size based on card table entry size
58 ObjectStartArray::initialize_block_size(_card_shift);
59 #endif
60
61 log_info_p(gc, init)("CardTable entry size: " UINT32_FORMAT, _card_size);
62 }
63
64 size_t CardTable::compute_byte_map_size() {
65 assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
66 "uninitialized, check declaration order");
67 assert(_page_size != 0, "uninitialized, check declaration order");
68 const size_t granularity = os::vm_allocation_granularity();
69 return align_up(_guard_index + 1, MAX2(_page_size, granularity));
70 }
71
72 CardTable::CardTable(MemRegion whole_heap) :
73 _whole_heap(whole_heap),
74 _guard_index(0),
75 _last_valid_index(0),
76 _page_size(os::vm_page_size()),
77 _byte_map_size(0),
78 _byte_map(NULL),
79 _byte_map_base(NULL),
80 _cur_covered_regions(0),
81 _covered(MemRegion::create_array(_max_covered_regions, mtGC)),
82 _committed(MemRegion::create_array(_max_covered_regions, mtGC)),
83 _guard_region()
84 {
85 assert((uintptr_t(_whole_heap.start()) & (_card_size - 1)) == 0, "heap must start at card boundary");
86 assert((uintptr_t(_whole_heap.end()) & (_card_size - 1)) == 0, "heap must end at card boundary");
87 }
88
89 CardTable::~CardTable() {
90 MemRegion::destroy_array(_covered, _max_covered_regions);
91 MemRegion::destroy_array(_committed, _max_covered_regions);
92 }
93
94 void CardTable::initialize() {
95 _guard_index = cards_required(_whole_heap.word_size()) - 1;
96 _last_valid_index = _guard_index - 1;
97
98 _byte_map_size = compute_byte_map_size();
99
100 HeapWord* low_bound = _whole_heap.start();
101 HeapWord* high_bound = _whole_heap.end();
102
103 _cur_covered_regions = 0;
104
105 const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
106 MAX2(_page_size, (size_t) os::vm_allocation_granularity());
107 ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);
108
109 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
110
111 os::trace_page_sizes("Card Table", _guard_index + 1, _guard_index + 1,
112 _page_size, heap_rs.base(), heap_rs.size());
113 if (!heap_rs.is_reserved()) {
114 vm_exit_during_initialization("Could not reserve enough space for the "
115 "card marking array");
116 }
117
118 // The assembler store_check code will do an unsigned shift of the oop,
119 // then add it to _byte_map_base, i.e.
120 //
121 // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
122 _byte_map = (CardValue*) heap_rs.base();
123 _byte_map_base = _byte_map - (uintptr_t(low_bound) >> _card_shift);
124 assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
125 assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map");
126
127 CardValue* guard_card = &_byte_map[_guard_index];
128 HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size);
129 _guard_region = MemRegion(guard_page, _page_size);
130 os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
131 !ExecMem, "card table last card");
132 *guard_card = last_card;
133
134 log_trace(gc, barrier)("CardTable::CardTable: ");
135 log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT,
136 p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index]));
137 log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base));
138 }
139
140 int CardTable::find_covering_region_by_base(HeapWord* base) {
141 int i;
142 for (i = 0; i < _cur_covered_regions; i++) {
143 if (_covered[i].start() == base) return i;
144 if (_covered[i].start() > base) break;
145 }
146 // If we didn't find it, create a new one.
147 assert(_cur_covered_regions < _max_covered_regions,
148 "too many covered regions");
149 // Move the ones above up, to maintain sorted order.
150 for (int j = _cur_covered_regions; j > i; j--) {
151 _covered[j] = _covered[j-1];
152 _committed[j] = _committed[j-1];
153 }
154 int res = i;
155 _cur_covered_regions++;
156 _covered[res].set_start(base);
157 _covered[res].set_word_size(0);
158 CardValue* ct_start = byte_for(base);
159 HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
160 _committed[res].set_start(ct_start_aligned);
161 _committed[res].set_word_size(0);
162 return res;
163 }
164
165 HeapWord* CardTable::largest_prev_committed_end(int ind) const {
166 HeapWord* max_end = NULL;
167 for (int j = 0; j < ind; j++) {
168 HeapWord* this_end = _committed[j].end();
169 if (this_end > max_end) max_end = this_end;
170 }
171 return max_end;
172 }
173
174 MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const {
175 MemRegion result = mr;
176 for (int r = 0; r < _cur_covered_regions; r += 1) {
177 if (r != self) {
178 result = result.minus(_committed[r]);
179 }
180 }
181 // Never include the guard page.
182 result = result.minus(_guard_region);
183 return result;
184 }
185
186 void CardTable::resize_covered_region(MemRegion new_region) {
187 // We don't change the start of a region, only the end.
188 assert(_whole_heap.contains(new_region),
189 "attempt to cover area not in reserved area");
190 debug_only(verify_guard();)
191 // collided is true if the expansion would push into another committed region
192 debug_only(bool collided = false;)
193 int const ind = find_covering_region_by_base(new_region.start());
194 MemRegion const old_region = _covered[ind];
195 assert(old_region.start() == new_region.start(), "just checking");
196 if (new_region.word_size() != old_region.word_size()) {
197 // Commit new or uncommit old pages, if necessary.
198 MemRegion cur_committed = _committed[ind];
199 // Extend the end of this _committed region
200 // to cover the end of any lower _committed regions.
201 // This forms overlapping regions, but never interior regions.
202 HeapWord* const max_prev_end = largest_prev_committed_end(ind);
203 if (max_prev_end > cur_committed.end()) {
204 cur_committed.set_end(max_prev_end);
205 }
206 // Align the end up to a page size (starts are already aligned).
207 HeapWord* new_end = (HeapWord*) byte_after(new_region.last());
208 HeapWord* new_end_aligned = align_up(new_end, _page_size);
209 assert(new_end_aligned >= new_end, "align up, but less");
210 // Check the other regions (excludes "ind") to ensure that
211 // the new_end_aligned does not intrude onto the committed
212 // space of another region.
213 int ri = 0;
214 for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
215 if (new_end_aligned > _committed[ri].start()) {
216 assert(new_end_aligned <= _committed[ri].end(),
217 "An earlier committed region can't cover a later committed region");
218 // Any region containing the new end
219 // should start at or beyond the region found (ind)
220 // for the new end (committed regions are not expected to
221 // be proper subsets of other committed regions).
222 assert(_committed[ri].start() >= _committed[ind].start(),
223 "New end of committed region is inconsistent");
224 new_end_aligned = _committed[ri].start();
225 // new_end_aligned can be equal to the start of its
226 // committed region (i.e., of "ind") if a second
227 // region following "ind" also start at the same location
228 // as "ind".
229 assert(new_end_aligned >= _committed[ind].start(),
230 "New end of committed region is before start");
231 debug_only(collided = true;)
232 // Should only collide with 1 region
233 break;
234 }
235 }
236 #ifdef ASSERT
237 for (++ri; ri < _cur_covered_regions; ri++) {
238 assert(!_committed[ri].contains(new_end_aligned),
239 "New end of committed region is in a second committed region");
240 }
241 #endif
242 // The guard page is always committed and should not be committed over.
243 // "guarded" is used for assertion checking below and recalls the fact
244 // that the would-be end of the new committed region would have
245 // penetrated the guard page.
246 HeapWord* new_end_for_commit = new_end_aligned;
247
248 DEBUG_ONLY(bool guarded = false;)
249 if (new_end_for_commit > _guard_region.start()) {
250 new_end_for_commit = _guard_region.start();
251 DEBUG_ONLY(guarded = true;)
252 }
253
254 if (new_end_for_commit > cur_committed.end()) {
255 // Must commit new pages.
256 MemRegion const new_committed =
257 MemRegion(cur_committed.end(), new_end_for_commit);
258
259 assert(!new_committed.is_empty(), "Region should not be empty here");
260 os::commit_memory_or_exit((char*)new_committed.start(),
261 new_committed.byte_size(), _page_size,
262 !ExecMem, "card table expansion");
263 // Use new_end_aligned (as opposed to new_end_for_commit) because
264 // the cur_committed region may include the guard region.
265 } else if (new_end_aligned < cur_committed.end()) {
266 // Must uncommit pages.
267 MemRegion const uncommit_region =
268 committed_unique_to_self(ind, MemRegion(new_end_aligned,
269 cur_committed.end()));
270 if (!uncommit_region.is_empty()) {
271 if (!os::uncommit_memory((char*)uncommit_region.start(),
272 uncommit_region.byte_size())) {
273 assert(false, "Card table contraction failed");
274 // The call failed so don't change the end of the
275 // committed region. This is better than taking the
276 // VM down.
277 new_end_aligned = _committed[ind].end();
278 }
279 }
280 }
281 // In any case, we can reset the end of the current committed entry.
282 _committed[ind].set_end(new_end_aligned);
283
284 #ifdef ASSERT
285 // Check that the last card in the new region is committed according
286 // to the tables.
287 bool covered = false;
288 for (int cr = 0; cr < _cur_covered_regions; cr++) {
289 if (_committed[cr].contains(new_end - 1)) {
290 covered = true;
291 break;
292 }
293 }
294 assert(covered, "Card for end of new region not committed");
295 #endif
296
297 // The default of 0 is not necessarily clean cards.
298 CardValue* entry;
299 if (old_region.last() < _whole_heap.start()) {
300 entry = byte_for(_whole_heap.start());
301 } else {
302 entry = byte_after(old_region.last());
303 }
304 assert(index_for(new_region.last()) < _guard_index,
305 "The guard card will be overwritten");
306 // This line commented out cleans the newly expanded region and
307 // not the aligned up expanded region.
308 // CardValue* const end = byte_after(new_region.last());
309 CardValue* const end = (CardValue*) new_end_for_commit;
310 assert((end >= byte_after(new_region.last())) || collided || guarded,
311 "Expect to be beyond new region unless impacting another region");
312 // do nothing if we resized downward.
313 #ifdef ASSERT
314 for (int ri = 0; ri < _cur_covered_regions; ri++) {
315 if (ri != ind) {
316 // The end of the new committed region should not
317 // be in any existing region unless it matches
318 // the start of the next region.
319 assert(!_committed[ri].contains(end) ||
320 (_committed[ri].start() == (HeapWord*) end),
321 "Overlapping committed regions");
322 }
323 }
324 #endif
325 if (entry < end) {
326 memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue)));
327 }
328 }
329 // In any case, the covered size changes.
330 _covered[ind].set_word_size(new_region.word_size());
331
332 log_trace(gc, barrier)("CardTable::resize_covered_region: ");
333 log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
334 ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
335 log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
336 ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
337 log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT,
338 p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last())));
339 log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT,
340 p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last())));
341
342 // Touch the last card of the covered region to show that it
343 // is committed (or SEGV).
344 debug_only((void) (*byte_for(_covered[ind].last()));)
345 debug_only(verify_guard();)
346 }
347
348 // Note that these versions are precise! The scanning code has to handle the
349 // fact that the write barrier may be either precise or imprecise.
350 void CardTable::dirty_MemRegion(MemRegion mr) {
351 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
352 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
353 CardValue* cur = byte_for(mr.start());
354 CardValue* last = byte_after(mr.last());
355 while (cur < last) {
356 *cur = dirty_card;
357 cur++;
358 }
359 }
360
361 void CardTable::clear_MemRegion(MemRegion mr) {
362 // Be conservative: only clean cards entirely contained within the
363 // region.
364 CardValue* cur;
365 if (mr.start() == _whole_heap.start()) {
366 cur = byte_for(mr.start());
367 } else {
368 assert(mr.start() > _whole_heap.start(), "mr is not covered.");
369 cur = byte_after(mr.start() - 1);
370 }
371 CardValue* last = byte_after(mr.last());
372 memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue)));
373 }
374
375 void CardTable::clear(MemRegion mr) {
376 for (int i = 0; i < _cur_covered_regions; i++) {
377 MemRegion mri = mr.intersection(_covered[i]);
378 if (!mri.is_empty()) clear_MemRegion(mri);
379 }
380 }
381
382 void CardTable::dirty(MemRegion mr) {
383 CardValue* first = byte_for(mr.start());
384 CardValue* last = byte_after(mr.last());
385 memset(first, dirty_card, last-first);
386 }
387
388 MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr,
389 bool reset,
390 int reset_val) {
391 for (int i = 0; i < _cur_covered_regions; i++) {
392 MemRegion mri = mr.intersection(_covered[i]);
393 if (!mri.is_empty()) {
394 CardValue* cur_entry, *next_entry, *limit;
395 for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last());
396 cur_entry <= limit;
397 cur_entry = next_entry) {
398 next_entry = cur_entry + 1;
399 if (*cur_entry == dirty_card) {
400 size_t dirty_cards;
401 // Accumulate maximal dirty card range, starting at cur_entry
402 for (dirty_cards = 1;
403 next_entry <= limit && *next_entry == dirty_card;
404 dirty_cards++, next_entry++);
405 MemRegion cur_cards(addr_for(cur_entry),
406 dirty_cards * _card_size_in_words);
407 if (reset) {
408 for (size_t i = 0; i < dirty_cards; i++) {
409 cur_entry[i] = reset_val;
410 }
411 }
412 return cur_cards;
413 }
414 }
415 }
416 }
417 return MemRegion(mr.end(), mr.end());
418 }
419
420 uintx CardTable::ct_max_alignment_constraint() {
421 // Calculate maximum alignment using GCCardSizeInBytes as card_size hasn't been set yet
422 return GCCardSizeInBytes * os::vm_page_size();
423 }
424
425 void CardTable::verify_guard() {
426 // For product build verification
427 guarantee(_byte_map[_guard_index] == last_card,
428 "card table guard has been modified");
429 }
430
431 void CardTable::invalidate(MemRegion mr) {
432 assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
433 assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" );
434 for (int i = 0; i < _cur_covered_regions; i++) {
435 MemRegion mri = mr.intersection(_covered[i]);
436 if (!mri.is_empty()) dirty_MemRegion(mri);
437 }
438 }
439
440 void CardTable::verify() {
441 verify_guard();
442 }
443
444 #ifndef PRODUCT
445 void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) {
446 CardValue* start = byte_for(mr.start());
447 CardValue* end = byte_for(mr.last());
448 bool failures = false;
449 for (CardValue* curr = start; curr <= end; ++curr) {
450 CardValue curr_val = *curr;
451 bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
452 if (failed) {
453 if (!failures) {
454 log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end));
455 log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val);
456 failures = true;
457 }
458 log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
459 p2i(curr), p2i(addr_for(curr)),
460 p2i((HeapWord*) (((size_t) addr_for(curr)) + _card_size)),
461 (int) curr_val);
462 }
463 }
464 guarantee(!failures, "there should not have been any failures");
465 }
466
467 void CardTable::verify_not_dirty_region(MemRegion mr) {
468 verify_region(mr, dirty_card, false /* val_equals */);
469 }
470
471 void CardTable::verify_dirty_region(MemRegion mr) {
472 verify_region(mr, dirty_card, true /* val_equals */);
473 }
474 #endif
475
476 void CardTable::print_on(outputStream* st) const {
477 st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT,
478 p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base));
479 }
--- EOF ---