1 /*
  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shenandoah/shenandoahCardTable.hpp"
 27 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 28 #include "gc/shenandoah/shenandoahUtils.hpp"
 29 #include "runtime/init.hpp"
 30 #include "services/memTracker.hpp"
 31 
 32 void ShenandoahCardTable::initialize() {
 33   size_t num_cards = cards_required(_whole_heap.word_size());
 34 
 35   // each card takes 1 byte; + 1 for the guard card
 36   size_t num_bytes = num_cards + 1;
 37   const size_t granularity = os::vm_allocation_granularity();
 38   _byte_map_size = align_up(num_bytes, MAX2(_page_size, granularity));
 39 
 40   HeapWord* low_bound  = _whole_heap.start();
 41   HeapWord* high_bound = _whole_heap.end();
 42 
 43   // TODO: Why rs_align is 0 on page_size == os::vm_page_size?
 44   // ReservedSpace constructor would assert rs_align >= os::vm_page_size().
 45   const size_t rs_align = _page_size == os::vm_page_size() ? 0 : MAX2(_page_size, granularity);
 46 
 47   ReservedSpace write_space(_byte_map_size, rs_align, _page_size);
 48   initialize(write_space);
 49 
 50   // The assembler store_check code will do an unsigned shift of the oop,
 51   // then add it to _byte_map_base, i.e.
 52   //
 53   //   _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift)
 54   _byte_map = (CardValue*) write_space.base();
 55   _byte_map_base = _byte_map - (uintptr_t(low_bound) >> _card_shift);
 56   assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map");
 57   assert(byte_for(high_bound-1) <= &_byte_map[last_valid_index()], "Checking end of map");
 58 
 59   CardValue* guard_card = &_byte_map[num_cards];
 60   assert(is_aligned(guard_card, _page_size), "must be on its own OS page");
 61   _guard_region = MemRegion((HeapWord*)guard_card, _page_size);
 62 
 63   _write_byte_map = _byte_map;
 64   _write_byte_map_base = _byte_map_base;
 65 
 66   ReservedSpace read_space(_byte_map_size, rs_align, _page_size);
 67   initialize(read_space);
 68 
 69   _read_byte_map = (CardValue*) read_space.base();
 70   _read_byte_map_base = _read_byte_map - (uintptr_t(low_bound) >> card_shift());
 71   assert(read_byte_for(low_bound) == &_read_byte_map[0], "Checking start of map");
 72   assert(read_byte_for(high_bound-1) <= &_read_byte_map[last_valid_index()], "Checking end of map");
 73 
 74   _covered[0] = _whole_heap;
 75 
 76   log_trace(gc, barrier)("ShenandoahCardTable::ShenandoahCardTable:");
 77   log_trace(gc, barrier)("    &_write_byte_map[0]: " INTPTR_FORMAT "  &_write_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 78                          p2i(&_write_byte_map[0]), p2i(&_write_byte_map[last_valid_index()]));
 79   log_trace(gc, barrier)("    _write_byte_map_base: " INTPTR_FORMAT, p2i(_write_byte_map_base));
 80   log_trace(gc, barrier)("    &_read_byte_map[0]: " INTPTR_FORMAT "  &_read_byte_map[_last_valid_index]: " INTPTR_FORMAT,
 81                   p2i(&_read_byte_map[0]), p2i(&_read_byte_map[last_valid_index()]));
 82   log_trace(gc, barrier)("    _read_byte_map_base: " INTPTR_FORMAT, p2i(_read_byte_map_base));
 83 
 84   // TODO: As currently implemented, we do not swap pointers between _read_byte_map and _write_byte_map
 85   // because the mutator write barrier hard codes the address of the _write_byte_map_base.  Instead,
 86   // the current implementation simply copies contents of _write_byte_map onto _read_byte_map and cleans
 87   // the entirety of _write_byte_map at the init_mark safepoint.
 88   //
 89   // If we choose to modify the mutator write barrier so that we can swap _read_byte_map_base and
 90   // _write_byte_map_base pointers, we may also have to figure out certain details about how the
 91   // _guard_region is implemented so that we can replicate the read and write versions of this region.
 92   //
 93   // Alternatively, we may switch to a SATB-based write barrier and replace the direct card-marking
 94   // remembered set with something entirely different.
 95 }
 96 
 97 void ShenandoahCardTable::initialize(const ReservedSpace& card_table) {
 98   MemTracker::record_virtual_memory_type((address)card_table.base(), mtGC);
 99 
100   os::trace_page_sizes("Card Table", _byte_map_size, _byte_map_size,
101                        _page_size, card_table.base(), card_table.size());
102   if (!card_table.is_reserved()) {
103     vm_exit_during_initialization("Could not reserve enough space for the card marking array");
104   }
105   os::commit_memory_or_exit(card_table.base(), _byte_map_size, card_table.alignment(), false,
106                             "Cannot commit memory for card table");
107 }
108 
109 bool ShenandoahCardTable::is_in_young(const void* obj) const {
110   return ShenandoahHeap::heap()->is_in_young(obj);
111 }
112 
113 CardValue* ShenandoahCardTable::read_byte_for(const void* p) {
114     CardValue* result = &_read_byte_map_base[uintptr_t(p) >> _card_shift];
115     assert(result >= _read_byte_map && result < _read_byte_map + _byte_map_size,
116            "out of bounds accessor for card marking array");
117     return result;
118 }
119 
120 size_t ShenandoahCardTable::last_valid_index() {
121   return CardTable::last_valid_index();
122 }
123 
124 // TODO: This service is not currently used because we are not able to swap _read_byte_map_base and
125 // _write_byte_map_base pointers.  If we were able to do so, we would invoke clear_read_table "immediately"
126 // following the end of concurrent remembered set scanning so that this read card table would be ready
127 // to serve as the new write card table at the time these pointer values were next swapped.
128 //
129 // In the current implementation, the write-table is cleared immediately after its contents is copied to
130 // the read table, obviating the need for this service.
131 void ShenandoahCardTable::clear_read_table() {
132   for (size_t i = 0; i < _byte_map_size; i++) {
133     _read_byte_map[i] = clean_card;
134   }
135 }
136 
137 // TODO: This service is not currently used because the mutator write barrier implementation hard codes the
138 // location of the _write_byte_may_base.  If we change the mutator's write barrier implementation, then we
139 // may use this service to exchange the roles of the read-card-table and write-card-table.
140 void ShenandoahCardTable::swap_card_tables() {
141   shenandoah_assert_safepoint();
142 
143   CardValue* save_value = _read_byte_map;
144   _read_byte_map = _write_byte_map;
145   _write_byte_map = save_value;
146 
147   save_value = _read_byte_map_base;
148   _read_byte_map_base = _write_byte_map_base;
149   _write_byte_map_base = save_value;
150 
151   // update the superclass instance variables
152   _byte_map = _write_byte_map;
153   _byte_map_base = _write_byte_map_base;
154 }