1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "gc/shared/fullGCForwarding.hpp" 26 #include "logging/log.hpp" 27 #include "nmt/memTag.hpp" 28 #include "utilities/ostream.hpp" 29 #include "utilities/concurrentHashTable.inline.hpp" 30 #include "utilities/fastHash.hpp" 31 #include "utilities/powerOfTwo.hpp" 32 33 static uintx hash(HeapWord* const& addr) { 34 uint64_t val = reinterpret_cast<uint64_t>(addr); 35 uint32_t hash = FastHash::get_hash32((uint32_t)val, (uint32_t)(val >> 32)); 36 return hash; 37 } 38 39 struct ForwardingEntry { 40 HeapWord* _from; 41 HeapWord* _to; 42 ForwardingEntry(HeapWord* from, HeapWord* to) : _from(from), _to(to) {} 43 }; 44 45 struct FallbackTableConfig { 46 using Value = ForwardingEntry; 47 static uintx get_hash(Value const& entry, bool* is_dead) { 48 return hash(entry._from); 49 } 50 static void* allocate_node(void* context, size_t size, Value const& value) { 51 return AllocateHeap(size, mtGC); 52 } 53 static void free_node(void* context, void* memory, Value const& value) { 54 FreeHeap(memory); 55 } 56 }; 57 58 class FallbackTable : public ConcurrentHashTable<FallbackTableConfig, mtGC> { 59 60 }; 61 62 class FallbackTableLookup : public StackObj { 63 ForwardingEntry const _entry; 64 public: 65 explicit FallbackTableLookup(HeapWord* from) : _entry(from, nullptr) {} 66 uintx get_hash() const { 67 return hash(_entry._from); 68 } 69 bool equals(ForwardingEntry* value) { 70 return _entry._from == value->_from; 71 } 72 bool is_dead(ForwardingEntry* value) { return false; } 73 }; 74 75 // We cannot use 0, because that may already be a valid base address in zero-based heaps. 76 // 0x1 is safe because heap base addresses must be aligned by much larger alignment 77 HeapWord* const FullGCForwarding::UNUSED_BASE = reinterpret_cast<HeapWord*>(0x1); 78 79 HeapWord* FullGCForwarding::_heap_start = nullptr; 80 size_t FullGCForwarding::_heap_start_region_bias = 0; 81 size_t FullGCForwarding::_num_regions = 0; 82 uintptr_t FullGCForwarding::_region_mask = 0; 83 HeapWord** FullGCForwarding::_biased_bases = nullptr; 84 HeapWord** FullGCForwarding::_bases_table = nullptr; 85 FallbackTable* FullGCForwarding::_fallback_table = nullptr; 86 #ifndef PRODUCT 87 volatile uint64_t FullGCForwarding::_num_forwardings = 0; 88 volatile uint64_t FullGCForwarding::_num_fallback_forwardings = 0; 89 #endif 90 91 void FullGCForwarding::initialize(MemRegion heap) { 92 #ifdef _LP64 93 _heap_start = heap.start(); 94 95 size_t rounded_heap_size = round_up_power_of_2(heap.byte_size()); 96 97 _num_regions = (rounded_heap_size / BytesPerWord) / BLOCK_SIZE_WORDS; 98 99 _heap_start_region_bias = (uintptr_t)_heap_start >> BLOCK_SIZE_BYTES_SHIFT; 100 _region_mask = ~((uintptr_t(1) << BLOCK_SIZE_BYTES_SHIFT) - 1); 101 102 assert(_bases_table == nullptr, "should not be initialized yet"); 103 assert(_fallback_table == nullptr, "should not be initialized yet"); 104 #endif 105 } 106 107 void FullGCForwarding::begin() { 108 #ifdef _LP64 109 assert(_bases_table == nullptr, "should not be initialized yet"); 110 assert(_fallback_table == nullptr, "should not be initialized yet"); 111 112 _fallback_table = new FallbackTable(); 113 114 #ifndef PRODUCT 115 _num_forwardings = 0; 116 _num_fallback_forwardings = 0; 117 #endif 118 119 size_t max = _num_regions; 120 _bases_table = NEW_C_HEAP_ARRAY(HeapWord*, max, mtGC); 121 HeapWord** biased_start = _bases_table - _heap_start_region_bias; 122 _biased_bases = biased_start; 123 for (size_t i = 0; i < max; i++) { 124 _bases_table[i] = UNUSED_BASE; 125 } 126 #endif 127 } 128 129 void FullGCForwarding::end() { 130 #ifndef PRODUCT 131 log_info(gc)("Total forwardings: " UINT64_FORMAT ", fallback forwardings: " UINT64_FORMAT 132 ", ratio: %f, memory used by fallback table: %zu%s, memory used by bases table: %zu%s", 133 _num_forwardings, _num_fallback_forwardings, (float)_num_forwardings/(float)_num_fallback_forwardings, 134 byte_size_in_proper_unit(_fallback_table->get_mem_size(Thread::current())), 135 proper_unit_for_byte_size(_fallback_table->get_mem_size(Thread::current())), 136 byte_size_in_proper_unit(sizeof(HeapWord*) * _num_regions), 137 proper_unit_for_byte_size(sizeof(HeapWord*) * _num_regions)); 138 #endif 139 #ifdef _LP64 140 assert(_bases_table != nullptr, "should be initialized"); 141 FREE_C_HEAP_ARRAY(HeapWord*, _bases_table); 142 _bases_table = nullptr; 143 delete _fallback_table; 144 _fallback_table = nullptr; 145 #endif 146 } 147 148 void FullGCForwarding::fallback_forward_to(HeapWord* from, HeapWord* to) { 149 assert(to != nullptr, "no null forwarding"); 150 assert(_fallback_table != nullptr, "should be initialized"); 151 FallbackTableLookup lookup_f(from); 152 ForwardingEntry entry(from, to); 153 auto found_f = [&](ForwardingEntry* found) { 154 // If dupe has been found, override it with new value. 155 // This is also called when new entry is succussfully inserted. 156 if (found->_to != to) { 157 found->_to = to; 158 } 159 }; 160 Thread* current_thread = Thread::current(); 161 bool grow; 162 bool added = _fallback_table->insert_get(current_thread, lookup_f, entry, found_f, &grow); 163 NOT_PRODUCT(Atomic::inc(&_num_fallback_forwardings);) 164 #ifdef ASSERT 165 assert(fallback_forwardee(from) != nullptr, "must have entered forwarding"); 166 assert(fallback_forwardee(from) == to, "forwarding must be correct, added: %s, from: " PTR_FORMAT ", to: " PTR_FORMAT ", fwd: " PTR_FORMAT, BOOL_TO_STR(added), p2i(from), p2i(to), p2i(fallback_forwardee(from))); 167 #endif 168 if (grow) { 169 _fallback_table->grow(current_thread); 170 tty->print_cr("grow fallback table to size: %zu bytes", 171 _fallback_table->get_mem_size(current_thread)); 172 } 173 } 174 175 HeapWord* FullGCForwarding::fallback_forwardee(HeapWord* from) { 176 assert(_fallback_table != nullptr, "fallback table must be present"); 177 HeapWord* result; 178 FallbackTableLookup lookup_f(from); 179 auto found_f = [&](ForwardingEntry* found) { 180 result = found->_to; 181 }; 182 bool found = _fallback_table->get(Thread::current(), lookup_f, found_f); 183 assert(found, "something must have been found"); 184 assert(result != nullptr, "must have found forwarding"); 185 return result; 186 }