1 /*
  2  * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/cardTable.hpp"
 27 #include "gc/shared/collectedHeap.hpp"
 28 #include "gc/shared/gcLogPrecious.hpp"
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "gc/shared/space.inline.hpp"
 31 #include "logging/log.hpp"
 32 #include "memory/virtualspace.hpp"
 33 #include "runtime/java.hpp"
 34 #include "runtime/os.hpp"
 35 #include "services/memTracker.hpp"
 36 #include "utilities/align.hpp"
 37 #if INCLUDE_PARALLELGC
 38 #include "gc/parallel/objectStartArray.hpp"
 39 #endif
 40 
 41 uint CardTable::_card_shift = 0;
 42 uint CardTable::_card_size = 0;
 43 uint CardTable::_card_size_in_words = 0;
 44 
 45 void CardTable::initialize_card_size() {
 46   assert(UseG1GC || UseParallelGC || UseSerialGC,
 47          "Initialize card size should only be called by card based collectors.");
 48 
 49   _card_size = GCCardSizeInBytes;
 50   _card_shift = log2i_exact(_card_size);
 51   _card_size_in_words = _card_size / sizeof(HeapWord);
 52 
 53   // Set blockOffsetTable size based on card table entry size
 54   BOTConstants::initialize_bot_size(_card_shift);
 55 
 56 #if INCLUDE_PARALLELGC
 57   // Set ObjectStartArray block size based on card table entry size
 58   ObjectStartArray::initialize_block_size(_card_shift);
 59 #endif
 60 
 61   log_info_p(gc, init)("CardTable entry size: " UINT32_FORMAT,  _card_size);
 62 }
 63 
 64 size_t CardTable::compute_byte_map_size(size_t num_bytes) {
 65   assert(_page_size != 0, "uninitialized, check declaration order");
 66   const size_t granularity = os::vm_allocation_granularity();
 67   return align_up(num_bytes, MAX2(_page_size, granularity));
 68 }
 69 
 70 CardTable::CardTable(MemRegion whole_heap) :
 71   _whole_heap(whole_heap),
 72   _page_size(os::vm_page_size()),
 73   _byte_map_size(0),
 74   _byte_map(NULL),
 75   _byte_map_base(NULL),
 76   _cur_covered_regions(0),
 77   _covered(MemRegion::create_array(_max_covered_regions, mtGC)),
 78   _committed(MemRegion::create_array(_max_covered_regions, mtGC)),
 79   _guard_region()
 80 {
 81   assert((uintptr_t(_whole_heap.start())  & (_card_size - 1))  == 0, "heap must start at card boundary");
 82   assert((uintptr_t(_whole_heap.end()) & (_card_size - 1))  == 0, "heap must end at card boundary");
 83 }
 84 
 85 CardTable::~CardTable() {
 86   MemRegion::destroy_array(_covered, _max_covered_regions);
 87   MemRegion::destroy_array(_committed, _max_covered_regions);
 88 }
 89 
 90 void CardTable::initialize() {
 91   size_t num_cards = cards_required(_whole_heap.word_size());
 92 
 93   // each card takes 1 byte; + 1 for the guard card
 94   size_t num_bytes = num_cards + 1;
 95   _byte_map_size = compute_byte_map_size(num_bytes);
 96 
 97   HeapWord* low_bound  = _whole_heap.start();
 98   HeapWord* high_bound = _whole_heap.end();
 99 
100   _cur_covered_regions = 0;
101 
102   const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 :
103     MAX2(_page_size, (size_t) os::vm_allocation_granularity());
104   ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size);
105 
106   MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC);
107 
108   os::trace_page_sizes("Card Table", num_bytes, num_bytes,
109                        _page_size, heap_rs.base(), heap_rs.size());
110   if (!heap_rs.is_reserved()) {
111     vm_exit_during_initialization("Could not reserve enough space for the "
112                                   "card marking array");
113   }
114 
115   // The assembler store_check code will do an unsigned shift of the oop,
116   // then add it to _byte_map_base, i.e.
117   //
118   //   _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
119   _byte_map = (CardValue*) heap_rs.base();
120   _byte_map_base = _byte_map - (uintptr_t(low_bound) >> _card_shift);
121   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
122   assert(byte_for(high_bound-1) <= &_byte_map[last_valid_index()], "Checking end of map");
123 
124   CardValue* guard_card = &_byte_map[num_cards];
125   assert(is_aligned(guard_card, _page_size), "must be on its own OS page");
126   _guard_region = MemRegion((HeapWord*)guard_card, _page_size);
127 
128   log_trace(gc, barrier)("CardTable::CardTable: ");
129   log_trace(gc, barrier)("    &_byte_map[0]: " PTR_FORMAT "  &_byte_map[last_valid_index()]: " PTR_FORMAT,
130                   p2i(&_byte_map[0]), p2i(&_byte_map[last_valid_index()]));
131   log_trace(gc, barrier)("    _byte_map_base: " PTR_FORMAT, p2i(_byte_map_base));
132 }
133 
134 int CardTable::find_covering_region_by_base(HeapWord* base) {
135   int i;
136   for (i = 0; i < _cur_covered_regions; i++) {
137     if (_covered[i].start() == base) return i;
138     if (_covered[i].start() > base) break;
139   }
140   // If we didn't find it, create a new one.
141   assert(_cur_covered_regions < _max_covered_regions,
142          "too many covered regions");
143   // Move the ones above up, to maintain sorted order.
144   for (int j = _cur_covered_regions; j > i; j--) {
145     _covered[j] = _covered[j-1];
146     _committed[j] = _committed[j-1];
147   }
148   int res = i;
149   _cur_covered_regions++;
150   _covered[res].set_start(base);
151   _covered[res].set_word_size(0);
152   CardValue* ct_start = byte_for(base);
153   HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size);
154   _committed[res].set_start(ct_start_aligned);
155   _committed[res].set_word_size(0);
156   return res;
157 }
158 
159 HeapWord* CardTable::largest_prev_committed_end(int ind) const {
160   HeapWord* max_end = NULL;
161   for (int j = 0; j < ind; j++) {
162     HeapWord* this_end = _committed[j].end();
163     if (this_end > max_end) max_end = this_end;
164   }
165   return max_end;
166 }
167 
168 MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const {
169   assert(mr.intersection(_guard_region).is_empty(), "precondition");
170   MemRegion result = mr;
171   for (int r = 0; r < _cur_covered_regions; r += 1) {
172     if (r != self) {
173       result = result.minus(_committed[r]);
174     }
175   }
176   return result;
177 }
178 
179 void CardTable::resize_covered_region(MemRegion new_region) {
180   // We don't change the start of a region, only the end.
181   assert(_whole_heap.contains(new_region),
182            "attempt to cover area not in reserved area");
183   // collided is true if the expansion would push into another committed region
184   debug_only(bool collided = false;)
185   int const ind = find_covering_region_by_base(new_region.start());
186   MemRegion const old_region = _covered[ind];
187   assert(old_region.start() == new_region.start(), "just checking");
188   if (new_region.word_size() != old_region.word_size()) {
189     // Commit new or uncommit old pages, if necessary.
190     MemRegion cur_committed = _committed[ind];
191     // Extend the end of this _committed region
192     // to cover the end of any lower _committed regions.
193     // This forms overlapping regions, but never interior regions.
194     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
195     if (max_prev_end > cur_committed.end()) {
196       cur_committed.set_end(max_prev_end);
197     }
198     // Align the end up to a page size (starts are already aligned).
199     HeapWord* new_end = (HeapWord*) byte_after(new_region.last());
200     HeapWord* new_end_aligned = align_up(new_end, _page_size);
201     assert(new_end_aligned >= new_end, "align up, but less");
202     // Check the other regions (excludes "ind") to ensure that
203     // the new_end_aligned does not intrude onto the committed
204     // space of another region.
205     int ri = 0;
206     for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
207       if (new_end_aligned > _committed[ri].start()) {
208         assert(new_end_aligned <= _committed[ri].end(),
209                "An earlier committed region can't cover a later committed region");
210         // Any region containing the new end
211         // should start at or beyond the region found (ind)
212         // for the new end (committed regions are not expected to
213         // be proper subsets of other committed regions).
214         assert(_committed[ri].start() >= _committed[ind].start(),
215                "New end of committed region is inconsistent");
216         new_end_aligned = _committed[ri].start();
217         // new_end_aligned can be equal to the start of its
218         // committed region (i.e., of "ind") if a second
219         // region following "ind" also start at the same location
220         // as "ind".
221         assert(new_end_aligned >= _committed[ind].start(),
222           "New end of committed region is before start");
223         debug_only(collided = true;)
224         // Should only collide with 1 region
225         break;
226       }
227     }
228 #ifdef ASSERT
229     for (++ri; ri < _cur_covered_regions; ri++) {
230       assert(!_committed[ri].contains(new_end_aligned),
231         "New end of committed region is in a second committed region");
232     }
233 #endif
234     // The guard page is always committed and should not be committed over.
235     // "guarded" is used for assertion checking below and recalls the fact
236     // that the would-be end of the new committed region would have
237     // penetrated the guard page.
238     HeapWord* new_end_for_commit = new_end_aligned;
239 
240     DEBUG_ONLY(bool guarded = false;)
241     if (new_end_for_commit > _guard_region.start()) {
242       new_end_for_commit = _guard_region.start();
243       DEBUG_ONLY(guarded = true;)
244     }
245 
246     if (new_end_for_commit > cur_committed.end()) {
247       // Must commit new pages.
248       MemRegion const new_committed =
249         MemRegion(cur_committed.end(), new_end_for_commit);
250 
251       assert(!new_committed.is_empty(), "Region should not be empty here");
252       os::commit_memory_or_exit((char*)new_committed.start(),
253                                 new_committed.byte_size(), _page_size,
254                                 !ExecMem, "card table expansion");
255     // Use new_end_aligned (as opposed to new_end_for_commit) because
256     // the cur_committed region may include the guard region.
257     } else if (new_end_aligned < cur_committed.end()) {
258       // Must uncommit pages.
259       MemRegion const uncommit_region =
260         committed_unique_to_self(ind, MemRegion(new_end_aligned,
261                                                 cur_committed.end()));
262       if (!uncommit_region.is_empty()) {
263         if (!os::uncommit_memory((char*)uncommit_region.start(),
264                                  uncommit_region.byte_size())) {
265           assert(false, "Card table contraction failed");
266           // The call failed so don't change the end of the
267           // committed region.  This is better than taking the
268           // VM down.
269           new_end_aligned = _committed[ind].end();
270         }
271       }
272     }
273     // In any case, we can reset the end of the current committed entry.
274     _committed[ind].set_end(new_end_aligned);
275 
276 #ifdef ASSERT
277     // Check that the last card in the new region is committed according
278     // to the tables.
279     bool covered = false;
280     for (int cr = 0; cr < _cur_covered_regions; cr++) {
281       if (_committed[cr].contains(new_end - 1)) {
282         covered = true;
283         break;
284       }
285     }
286     assert(covered, "Card for end of new region not committed");
287 #endif
288 
289     // The default of 0 is not necessarily clean cards.
290     CardValue* entry;
291     if (old_region.last() < _whole_heap.start()) {
292       entry = byte_for(_whole_heap.start());
293     } else {
294       entry = byte_after(old_region.last());
295     }
296     assert(index_for(new_region.last()) <=  last_valid_index(),
297       "The guard card will be overwritten");
298     // This line commented out cleans the newly expanded region and
299     // not the aligned up expanded region.
300     // CardValue* const end = byte_after(new_region.last());
301     CardValue* const end = (CardValue*) new_end_for_commit;
302     assert((end >= byte_after(new_region.last())) || collided || guarded,
303       "Expect to be beyond new region unless impacting another region");
304     // do nothing if we resized downward.
305 #ifdef ASSERT
306     for (int ri = 0; ri < _cur_covered_regions; ri++) {
307       if (ri != ind) {
308         // The end of the new committed region should not
309         // be in any existing region unless it matches
310         // the start of the next region.
311         assert(!_committed[ri].contains(end) ||
312                (_committed[ri].start() == (HeapWord*) end),
313                "Overlapping committed regions");
314       }
315     }
316 #endif
317     if (entry < end) {
318       memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue)));
319     }
320   }
321   // In any case, the covered size changes.
322   _covered[ind].set_word_size(new_region.word_size());
323 
324   log_trace(gc, barrier)("CardTable::resize_covered_region: ");
325   log_trace(gc, barrier)("    _covered[%d].start(): " PTR_FORMAT " _covered[%d].last(): " PTR_FORMAT,
326                          ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
327   log_trace(gc, barrier)("    _committed[%d].start(): " PTR_FORMAT "  _committed[%d].last(): " PTR_FORMAT,
328                          ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last()));
329   log_trace(gc, barrier)("    byte_for(start): " PTR_FORMAT "  byte_for(last): " PTR_FORMAT,
330                          p2i(byte_for(_covered[ind].start())),  p2i(byte_for(_covered[ind].last())));
331   log_trace(gc, barrier)("    addr_for(start): " PTR_FORMAT "  addr_for(last): " PTR_FORMAT,
332                          p2i(addr_for((CardValue*) _committed[ind].start())),  p2i(addr_for((CardValue*) _committed[ind].last())));
333 
334   // Touch the last card of the covered region to show that it
335   // is committed (or SEGV).
336   debug_only((void) (*byte_for(_covered[ind].last()));)
337 }
338 
339 // Note that these versions are precise!  The scanning code has to handle the
340 // fact that the write barrier may be either precise or imprecise.
341 void CardTable::dirty_MemRegion(MemRegion mr) {
342   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
343   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
344   CardValue* cur  = byte_for(mr.start());
345   CardValue* last = byte_after(mr.last());
346   while (cur < last) {
347     *cur = dirty_card;
348     cur++;
349   }
350 }
351 
352 void CardTable::clear_MemRegion(MemRegion mr) {
353   // Be conservative: only clean cards entirely contained within the
354   // region.
355   CardValue* cur;
356   if (mr.start() == _whole_heap.start()) {
357     cur = byte_for(mr.start());
358   } else {
359     assert(mr.start() > _whole_heap.start(), "mr is not covered.");
360     cur = byte_after(mr.start() - 1);
361   }
362   CardValue* last = byte_after(mr.last());
363   memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue)));
364 }
365 
366 void CardTable::clear(MemRegion mr) {
367   for (int i = 0; i < _cur_covered_regions; i++) {
368     MemRegion mri = mr.intersection(_covered[i]);
369     if (!mri.is_empty()) clear_MemRegion(mri);
370   }
371 }
372 
373 uintx CardTable::ct_max_alignment_constraint() {
374   // Calculate maximum alignment using GCCardSizeInBytes as card_size hasn't been set yet
375   return GCCardSizeInBytes * os::vm_page_size();
376 }
377 
378 void CardTable::invalidate(MemRegion mr) {
379   assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start");
380   assert(align_up  (mr.end(),   HeapWordSize) == mr.end(),   "Unaligned end"  );
381   for (int i = 0; i < _cur_covered_regions; i++) {
382     MemRegion mri = mr.intersection(_covered[i]);
383     if (!mri.is_empty()) dirty_MemRegion(mri);
384   }
385 }
386 
387 #ifndef PRODUCT
388 void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) {
389   CardValue* start    = byte_for(mr.start());
390   CardValue* end      = byte_for(mr.last());
391   bool failures = false;
392   for (CardValue* curr = start; curr <= end; ++curr) {
393     CardValue curr_val = *curr;
394     bool failed = (val_equals) ? (curr_val != val) : (curr_val == val);
395     if (failed) {
396       if (!failures) {
397         log_error(gc, verify)("== CT verification failed: [" PTR_FORMAT "," PTR_FORMAT "]", p2i(start), p2i(end));
398         log_error(gc, verify)("==   %sexpecting value: %d", (val_equals) ? "" : "not ", val);
399         failures = true;
400       }
401       log_error(gc, verify)("==   card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d",
402                             p2i(curr), p2i(addr_for(curr)),
403                             p2i((HeapWord*) (((size_t) addr_for(curr)) + _card_size)),
404                             (int) curr_val);
405     }
406   }
407   guarantee(!failures, "there should not have been any failures");
408 }
409 
410 void CardTable::verify_not_dirty_region(MemRegion mr) {
411   verify_region(mr, dirty_card, false /* val_equals */);
412 }
413 
414 void CardTable::verify_dirty_region(MemRegion mr) {
415   verify_region(mr, dirty_card, true /* val_equals */);
416 }
417 #endif
418 
419 void CardTable::print_on(outputStream* st) const {
420   st->print_cr("Card table byte_map: [" PTR_FORMAT "," PTR_FORMAT "] _byte_map_base: " PTR_FORMAT,
421                p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base));
422 }