1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shenandoah/shenandoahCardTable.hpp"
 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 28 #include "gc/shenandoah/shenandoahUtils.hpp"
 29 #include "runtime/init.hpp"
 30 #include "nmt/memTracker.hpp"
 31 
 32 void ShenandoahCardTable::initialize() {
 33   size_t num_cards = cards_required(_whole_heap.word_size());
 34 
 35   // each card takes 1 byte; + 1 for the guard card
 36   size_t num_bytes = num_cards + 1;
 37   const size_t granularity = os::vm_allocation_granularity();
 38   _byte_map_size = align_up(num_bytes, MAX2(_page_size, granularity));
 39 
 40   HeapWord* low_bound  = _whole_heap.start();
 41   HeapWord* high_bound = _whole_heap.end();
 42 
 43   // TODO: Why rs_align is 0 on page_size == os::vm_page_size?
 44   // ReservedSpace constructor would assert rs_align >= os::vm_page_size().
 45   const size_t rs_align = _page_size == os::vm_page_size() ? 0 : MAX2(_page_size, granularity);
 46 
 47   ReservedSpace write_space(_byte_map_size, rs_align, _page_size);
 48   initialize(write_space);
 49 
 50   // The assembler store_check code will do an unsigned shift of the oop,
 51   // then add it to _byte_map_base, i.e.
 52   //
 53   //   _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
 54   _byte_map = (CardValue*) write_space.base();
 55   _byte_map_base = _byte_map - (uintptr_t(low_bound) >> _card_shift);
 56   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 57   assert(byte_for(high_bound-1) <= &_byte_map[last_valid_index()], "Checking end of map");
 58 
 59   _write_byte_map = _byte_map;
 60   _write_byte_map_base = _byte_map_base;
 61 
 62   ReservedSpace read_space(_byte_map_size, rs_align, _page_size);
 63   initialize(read_space);
 64 
 65   _read_byte_map = (CardValue*) read_space.base();
 66   _read_byte_map_base = _read_byte_map - (uintptr_t(low_bound) >> card_shift());
 67   assert(read_byte_for(low_bound) == &_read_byte_map[0], "Checking start of map");
 68   assert(read_byte_for(high_bound-1) <= &_read_byte_map[last_valid_index()], "Checking end of map");
 69 
 70   _covered[0] = _whole_heap;
 71 
 72   log_trace(gc, barrier)("ShenandoahCardTable::ShenandoahCardTable:");
 73   log_trace(gc, barrier)("    &_write_byte_map[0]: " INTPTR_FORMAT "  &_write_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 74                          p2i(&_write_byte_map[0]), p2i(&_write_byte_map[last_valid_index()]));
 75   log_trace(gc, barrier)("    _write_byte_map_base: " INTPTR_FORMAT, p2i(_write_byte_map_base));
 76   log_trace(gc, barrier)("    &_read_byte_map[0]: " INTPTR_FORMAT "  &_read_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 77                   p2i(&_read_byte_map[0]), p2i(&_read_byte_map[last_valid_index()]));
 78   log_trace(gc, barrier)("    _read_byte_map_base: " INTPTR_FORMAT, p2i(_read_byte_map_base));
 79 
 80   // TODO: As currently implemented, we do not swap pointers between _read_byte_map and _write_byte_map
 81   // because the mutator write barrier hard codes the address of the _write_byte_map_base.  Instead,
 82   // the current implementation simply copies contents of _write_byte_map onto _read_byte_map and cleans
 83   // the entirety of _write_byte_map at the init_mark safepoint.
 84   //
 85   // Alternatively, we may switch to a SATB-based write barrier and replace the direct card-marking
 86   // remembered set with something entirely different.
 87 }
 88 
 89 void ShenandoahCardTable::initialize(const ReservedSpace& card_table) {
 90   MemTracker::record_virtual_memory_type((address)card_table.base(), mtGC);
 91 
 92   os::trace_page_sizes("Card Table", _byte_map_size, _byte_map_size,
 93                        card_table.base(), card_table.size(), _page_size);
 94   if (!card_table.is_reserved()) {
 95     vm_exit_during_initialization("Could not reserve enough space for the card marking array");
 96   }
 97   os::commit_memory_or_exit(card_table.base(), _byte_map_size, card_table.alignment(), false,
 98                             "Cannot commit memory for card table");
 99 }
100 
101 bool ShenandoahCardTable::is_in_young(const void* obj) const {
102   return ShenandoahHeap::heap()->is_in_young(obj);
103 }
104 
105 CardValue* ShenandoahCardTable::read_byte_for(const void* p) {
106     CardValue* result = &_read_byte_map_base[uintptr_t(p) >> _card_shift];
107     assert(result >= _read_byte_map && result < _read_byte_map + _byte_map_size,
108            "out of bounds accessor for card marking array");
109     return result;
110 }
111 
112 size_t ShenandoahCardTable::last_valid_index() {
113   return CardTable::last_valid_index();
114 }
115 
116 // TODO: This service is not currently used because we are not able to swap _read_byte_map_base and
117 // _write_byte_map_base pointers.  If we were able to do so, we would invoke clear_read_table "immediately"
118 // following the end of concurrent remembered set scanning so that this read card table would be ready
119 // to serve as the new write card table at the time these pointer values were next swapped.
120 //
121 // In the current implementation, the write-table is cleared immediately after its contents is copied to
122 // the read table, obviating the need for this service.
123 void ShenandoahCardTable::clear_read_table() {
124   for (size_t i = 0; i < _byte_map_size; i++) {
125     _read_byte_map[i] = clean_card;
126   }
127 }
128 
129 // TODO: This service is not currently used because the mutator write barrier implementation hard codes the
130 // location of the _write_byte_may_base.  If we change the mutator's write barrier implementation, then we
131 // may use this service to exchange the roles of the read-card-table and write-card-table.
132 void ShenandoahCardTable::swap_card_tables() {
133   shenandoah_assert_safepoint();
134 
135   CardValue* save_value = _read_byte_map;
136   _read_byte_map = _write_byte_map;
137   _write_byte_map = save_value;
138 
139   save_value = _read_byte_map_base;
140   _read_byte_map_base = _write_byte_map_base;
141   _write_byte_map_base = save_value;
142 
143   // update the superclass instance variables
144   _byte_map = _write_byte_map;
145   _byte_map_base = _write_byte_map_base;
146 }