1 /*
  2  * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "gc/shared/gc_globals.hpp"
 28 #include "gc/shared/slidingForwarding.hpp"
 29 #include "utilities/fastHash.hpp"
 30 #include "utilities/ostream.hpp"
 31 #include "utilities/powerOfTwo.hpp"
 32 
 33 // We cannot use 0, because that may already be a valid base address in zero-based heaps.
 34 // 0x1 is safe because heap base addresses must be aligned by much larger alignment
 35 HeapWord* const SlidingForwarding::UNUSED_BASE = reinterpret_cast<HeapWord*>(0x1);
 36 
 37 HeapWord* SlidingForwarding::_heap_start = nullptr;
 38 size_t SlidingForwarding::_region_size_words = 0;
 39 size_t SlidingForwarding::_heap_start_region_bias = 0;
 40 size_t SlidingForwarding::_num_regions = 0;
 41 uint SlidingForwarding::_region_size_bytes_shift = 0;
 42 uintptr_t SlidingForwarding::_region_mask = 0;
 43 HeapWord** SlidingForwarding::_biased_bases[SlidingForwarding::NUM_TARGET_REGIONS] = { nullptr, nullptr };
 44 HeapWord** SlidingForwarding::_bases_table = nullptr;
 45 SlidingForwarding::FallbackTable* SlidingForwarding::_fallback_table = nullptr;
 46 
 47 void SlidingForwarding::initialize(MemRegion heap, size_t region_size_words) {
 48 #ifdef _LP64
 49   if (UseAltGCForwarding) {
 50     _heap_start = heap.start();
 51 
 52     // If the heap is small enough to fit directly into the available offset bits,
 53     // and we are running Serial GC, we can treat the whole heap as a single region
 54     // if it happens to be aligned to allow biasing.
 55     size_t rounded_heap_size = round_up_power_of_2(heap.byte_size());
 56 
 57     if (UseSerialGC && (heap.word_size() <= (1 << NUM_OFFSET_BITS)) &&
 58         is_aligned((uintptr_t)_heap_start, rounded_heap_size)) {
 59       _num_regions = 1;
 60       _region_size_words = heap.word_size();
 61       _region_size_bytes_shift = log2i_exact(rounded_heap_size);
 62     } else {
 63       _num_regions = align_up(pointer_delta(heap.end(), heap.start()), region_size_words) / region_size_words;
 64       _region_size_words = region_size_words;
 65       _region_size_bytes_shift = log2i_exact(_region_size_words) + LogHeapWordSize;
 66     }
 67     _heap_start_region_bias = (uintptr_t)_heap_start >> _region_size_bytes_shift;
 68     _region_mask = ~((uintptr_t(1) << _region_size_bytes_shift) - 1);
 69 
 70     guarantee((_heap_start_region_bias << _region_size_bytes_shift) == (uintptr_t)_heap_start, "must be aligned: _heap_start_region_bias: " SIZE_FORMAT ", _region_size_byte_shift: %u, _heap_start: " PTR_FORMAT, _heap_start_region_bias, _region_size_bytes_shift, p2i(_heap_start));
 71 
 72     assert(_region_size_words >= 1, "regions must be at least a word large");
 73     assert(_bases_table == nullptr, "should not be initialized yet");
 74     assert(_fallback_table == nullptr, "should not be initialized yet");
 75   }
 76 #endif
 77 }
 78 
 79 void SlidingForwarding::begin() {
 80 #ifdef _LP64
 81   if (UseAltGCForwarding) {
 82     assert(_bases_table == nullptr, "should not be initialized yet");
 83     assert(_fallback_table == nullptr, "should not be initialized yet");
 84 
 85     size_t max = _num_regions * NUM_TARGET_REGIONS;
 86     _bases_table = NEW_C_HEAP_ARRAY(HeapWord*, max, mtGC);
 87     HeapWord** biased_start = _bases_table - _heap_start_region_bias;
 88     _biased_bases[0] = biased_start;
 89     _biased_bases[1] = biased_start + _num_regions;
 90     for (size_t i = 0; i < max; i++) {
 91       _bases_table[i] = UNUSED_BASE;
 92     }
 93   }
 94 #endif
 95 }
 96 
 97 void SlidingForwarding::end() {
 98 #ifdef _LP64
 99   if (UseAltGCForwarding) {
100     assert(_bases_table != nullptr, "should be initialized");
101     FREE_C_HEAP_ARRAY(HeapWord*, _bases_table);
102     _bases_table = nullptr;
103     delete _fallback_table;
104     _fallback_table = nullptr;
105   }
106 #endif
107 }
108 
109 void SlidingForwarding::fallback_forward_to(HeapWord* from, HeapWord* to) {
110   if (_fallback_table == nullptr) {
111     _fallback_table = new FallbackTable();
112   }
113   _fallback_table->forward_to(from, to);
114 }
115 
116 HeapWord* SlidingForwarding::fallback_forwardee(HeapWord* from) {
117   assert(_fallback_table != nullptr, "fallback table must be present");
118   return _fallback_table->forwardee(from);
119 }
120 
121 SlidingForwarding::FallbackTable::FallbackTable() {
122   for (uint i = 0; i < TABLE_SIZE; i++) {
123     _table[i]._next = nullptr;
124     _table[i]._from = nullptr;
125     _table[i]._to   = nullptr;
126   }
127 }
128 
129 SlidingForwarding::FallbackTable::~FallbackTable() {
130   for (uint i = 0; i < TABLE_SIZE; i++) {
131     FallbackTableEntry* entry = _table[i]._next;
132     while (entry != nullptr) {
133       FallbackTableEntry* next = entry->_next;
134       FREE_C_HEAP_OBJ(entry);
135       entry = next;
136     }
137   }
138 }
139 
140 size_t SlidingForwarding::FallbackTable::home_index(HeapWord* from) {
141   uint64_t val = reinterpret_cast<uint64_t>(from);
142   uint64_t hash = FastHash::get_hash64(val, UCONST64(0xAAAAAAAAAAAAAAAA));
143   return hash >> (64 - log2i_exact(TABLE_SIZE));
144 }
145 
146 void SlidingForwarding::FallbackTable::forward_to(HeapWord* from, HeapWord* to) {
147   size_t idx = home_index(from);
148   FallbackTableEntry* head = &_table[idx];
149 #ifdef ASSERT
150   // Search existing entry in chain starting at idx.
151   for (FallbackTableEntry* entry = head; entry != nullptr; entry = entry->_next) {
152     assert(entry->_from != from, "Don't re-forward entries into the fallback-table");
153   }
154 #endif
155   // No entry found, create new one and insert after head.
156   FallbackTableEntry* new_entry = NEW_C_HEAP_OBJ(FallbackTableEntry, mtGC);
157   *new_entry = *head;
158   head->_next = new_entry;
159   head->_from = from;
160   head->_to   = to;
161 }
162 
163 HeapWord* SlidingForwarding::FallbackTable::forwardee(HeapWord* from) const {
164   size_t idx = home_index(from);
165   const FallbackTableEntry* entry = &_table[idx];
166   while (entry != nullptr) {
167     if (entry->_from == from) {
168       return entry->_to;
169     }
170     entry = entry->_next;
171   }
172   return nullptr;
173 }