2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef GC_SHARED_FULLGCFORWARDING_INLINE_HPP
26 #define GC_SHARED_FULLGCFORWARDING_INLINE_HPP
27
28 #include "gc/shared/fullGCForwarding.hpp"
29
30 #include "oops/oop.inline.hpp"
31 #include "utilities/globalDefinitions.hpp"
32
33 void FullGCForwarding::forward_to(oop from, oop to) {
34 #ifdef _LP64
35 uintptr_t encoded = pointer_delta(cast_from_oop<HeapWord*>(to), _heap_base) << Shift;
36 assert(encoded <= static_cast<uintptr_t>(right_n_bits(_num_low_bits)), "encoded forwardee must fit");
37 uintptr_t mark = from->mark().value();
38 mark &= ~right_n_bits(_num_low_bits);
39 mark |= (encoded | markWord::marked_value);
40 from->set_mark(markWord(mark));
41 #else
42 from->forward_to(to);
43 #endif
44 }
45
46 oop FullGCForwarding::forwardee(oop from) {
47 #ifdef _LP64
48 uintptr_t mark = from->mark().value();
49 HeapWord* decoded = _heap_base + ((mark & right_n_bits(_num_low_bits)) >> Shift);
50 return cast_to_oop(decoded);
51 #else
52 return from->forwardee();
53 #endif
54 }
55
56 bool FullGCForwarding::is_forwarded(oop obj) {
57 return obj->mark().is_forwarded();
58 }
59
60 #endif // GC_SHARED_FULLGCFORWARDING_INLINE_HPP
|
2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24 #ifndef SHARE_GC_SHARED_FULLGCFORWARDING_INLINE_HPP
25 #define SHARE_GC_SHARED_FULLGCFORWARDING_INLINE_HPP
26
27 #include "gc/shared/fullGCForwarding.hpp"
28 #include "logging/log.hpp"
29 #include "nmt/memTag.hpp"
30 #include "oops/markWord.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "utilities/concurrentHashTable.inline.hpp"
33 #include "utilities/fastHash.hpp"
34 #include "utilities/macros.hpp"
35 #include "utilities/powerOfTwo.hpp"
36
37 // We cannot use 0, because that may already be a valid base address in zero-based heaps.
38 // 0x1 is safe because heap base addresses must be aligned by much larger alignment
39 template <int BITS>
40 HeapWord* const FullGCForwardingImpl<BITS>::UNUSED_BASE = reinterpret_cast<HeapWord*>(0x1);
41
42 template <int BITS>
43 HeapWord* FullGCForwardingImpl<BITS>::_heap_start = nullptr;
44 template <int BITS>
45 size_t FullGCForwardingImpl<BITS>::_heap_start_region_bias = 0;
46 template <int BITS>
47 size_t FullGCForwardingImpl<BITS>::_num_regions = 0;
48 template <int BITS>
49 uintptr_t FullGCForwardingImpl<BITS>::_region_mask = 0;
50 template <int BITS>
51 HeapWord** FullGCForwardingImpl<BITS>::_biased_bases = nullptr;
52 template <int BITS>
53 HeapWord** FullGCForwardingImpl<BITS>::_bases_table = nullptr;
54 template <int BITS>
55 size_t FullGCForwardingImpl<BITS>::_fallback_table_log2_start_size = 9; // 512 entries.
56 template <int BITS>
57 FallbackTable* FullGCForwardingImpl<BITS>::_fallback_table = nullptr;
58 #ifndef PRODUCT
59 template <int BITS>
60 volatile uint64_t FullGCForwardingImpl<BITS>::_num_forwardings = 0;
61 template <int BITS>
62 volatile uint64_t FullGCForwardingImpl<BITS>::_num_fallback_forwardings = 0;
63 #endif
64
65 template <int BITS>
66 bool FullGCForwardingImpl<BITS>::is_forwarded(oop obj) {
67 return obj->is_forwarded();
68 }
69
70 template <int BITS>
71 size_t FullGCForwardingImpl<BITS>::biased_region_index_containing(HeapWord* addr) {
72 return reinterpret_cast<uintptr_t>(addr) >> BLOCK_SIZE_BYTES_SHIFT;
73 }
74
75 template <int BITS>
76 bool FullGCForwardingImpl<BITS>::is_fallback(uintptr_t encoded) {
77 return (encoded & OFFSET_MASK) == FALLBACK_PATTERN_IN_PLACE;
78 }
79
80 template <int BITS>
81 uintptr_t FullGCForwardingImpl<BITS>::encode_forwarding(HeapWord* from, HeapWord* to) {
82 size_t from_block_idx = biased_region_index_containing(from);
83
84 HeapWord* to_region_base = _biased_bases[from_block_idx];
85 if (to_region_base == UNUSED_BASE) {
86 HeapWord* prev = Atomic::cmpxchg(&_biased_bases[from_block_idx], UNUSED_BASE, to);
87 if (prev == UNUSED_BASE) {
88 to_region_base = to;
89 } else {
90 to_region_base = prev;
91 }
92 // _biased_bases[from_block_idx] = to_region_base = to;
93 }
94 // Avoid pointer_delta() on purpose: using an unsigned subtraction,
95 // we get an underflow when to < to_region_base, which means
96 // we can use a single comparison instead of:
97 // if (to_region_base > to || (to - to_region_base) > MAX_OFFSET) { .. }
98 size_t offset = static_cast<size_t>(to - to_region_base);
99 if (offset > MAX_OFFSET) {
100 offset = FALLBACK_PATTERN;
101 }
102 uintptr_t encoded = (offset << OFFSET_BITS_SHIFT) | markWord::marked_value;
103
104 assert(is_fallback(encoded) || to == decode_forwarding(from, encoded), "must be reversible: " PTR_FORMAT " -> " PTR_FORMAT ", reversed: " PTR_FORMAT ", encoded: " INTPTR_FORMAT ", to_region_base: " PTR_FORMAT ", from_block_idx: %lu", p2i(from), p2i(to), p2i(decode_forwarding(from, encoded)), encoded, p2i(to_region_base), from_block_idx);
105 assert((encoded & ~AVAILABLE_BITS_MASK) == 0, "must encode to available bits");
106 return encoded;
107 }
108
109 template <int BITS>
110 HeapWord* FullGCForwardingImpl<BITS>::decode_forwarding(HeapWord* from, uintptr_t encoded) {
111 assert(!is_fallback(encoded), "must not be fallback-forwarded, encoded: " INTPTR_FORMAT ", OFFSET_MASK: " INTPTR_FORMAT ", FALLBACK_PATTERN_IN_PLACE: " INTPTR_FORMAT, encoded, OFFSET_MASK, FALLBACK_PATTERN_IN_PLACE);
112 assert((encoded & ~AVAILABLE_BITS_MASK) == 0, "must decode from available bits, encoded: " INTPTR_FORMAT, encoded);
113 uintptr_t offset = (encoded >> OFFSET_BITS_SHIFT);
114
115 size_t from_idx = biased_region_index_containing(from);
116 HeapWord* base = _biased_bases[from_idx];
117 assert(base != UNUSED_BASE, "must not be unused base: encoded: " INTPTR_FORMAT, encoded);
118 HeapWord* decoded = base + offset;
119 assert(decoded >= _heap_start,
120 "Address must be above heap start. encoded: " INTPTR_FORMAT ", base: " PTR_FORMAT,
121 encoded, p2i(base));
122
123 return decoded;
124 }
125
126 template <int BITS>
127 void FullGCForwardingImpl<BITS>::forward_to_impl(oop from, oop to) {
128 assert(_bases_table != nullptr, "call begin() before forwarding");
129
130 markWord from_header = from->mark();
131 HeapWord* from_hw = cast_from_oop<HeapWord*>(from);
132 HeapWord* to_hw = cast_from_oop<HeapWord*>(to);
133 uintptr_t encoded = encode_forwarding(from_hw, to_hw);
134 markWord new_header = markWord((from_header.value() & ~OFFSET_MASK) | encoded);
135 from->set_mark(new_header);
136
137 if (is_fallback(encoded)) {
138 fallback_forward_to(from_hw, to_hw);
139 }
140 NOT_PRODUCT(Atomic::inc(&_num_forwardings);)
141 }
142
143 template <int BITS>
144 void FullGCForwardingImpl<BITS>::forward_to(oop obj, oop fwd) {
145 assert(fwd != nullptr, "no null forwarding");
146 #ifdef _LP64
147 assert(_bases_table != nullptr, "expect sliding forwarding initialized");
148 forward_to_impl(obj, fwd);
149 // assert(forwardee(obj) == fwd, "must be forwarded to correct forwardee, obj: " PTR_FORMAT ", forwardee(obj): " PTR_FORMAT ", fwd: " PTR_FORMAT ", mark: " INTPTR_FORMAT ", num-regions: %lu, base: " PTR_FORMAT ", OFFSET_MASK: " INTPTR_FORMAT ", encoded: " PTR_FORMAT ", biased-base: " PTR_FORMAT ", heap-start: " PTR_FORMAT, p2i(obj), p2i(forwardee(obj)), p2i(fwd), obj->mark().value(), _num_regions, p2i(_bases_table[0]), OFFSET_MASK, encode_forwarding(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(fwd)), p2i(_biased_bases[biased_region_index_containing(cast_from_oop<HeapWord*>(obj))]), p2i(_heap_start));
150 #else
151 obj->forward_to(fwd);
152 #endif
153 }
154
155 template <int BITS>
156 oop FullGCForwardingImpl<BITS>::forwardee_impl(oop from) {
157 assert(_bases_table != nullptr, "call begin() before asking for forwarding");
158
159 markWord header = from->mark();
160 HeapWord* from_hw = cast_from_oop<HeapWord*>(from);
161 if (is_fallback(header.value())) {
162 HeapWord* to = fallback_forwardee(from_hw);
163 return cast_to_oop(to);
164 }
165 uintptr_t encoded = header.value() & OFFSET_MASK;
166 HeapWord* to = decode_forwarding(from_hw, encoded);
167 return cast_to_oop(to);
168 }
169
170 template <int BITS>
171 oop FullGCForwardingImpl<BITS>::forwardee(oop obj) {
172 #ifdef _LP64
173 assert(_bases_table != nullptr, "expect sliding forwarding initialized");
174 return forwardee_impl(obj);
175 #else
176 return obj->forwardee();
177 #endif
178 }
179
180 static uintx hash(HeapWord* const& addr) {
181 uint64_t val = reinterpret_cast<uint64_t>(addr);
182 uint32_t hash = FastHash::get_hash32(static_cast<uint32_t>(val), static_cast<uint32_t>(val >> 32));
183 return hash;
184 }
185
186 struct ForwardingEntry {
187 HeapWord* _from;
188 HeapWord* _to;
189 ForwardingEntry(HeapWord* from, HeapWord* to) : _from(from), _to(to) {}
190 };
191
192 struct FallbackTableConfig {
193 using Value = ForwardingEntry;
194 static uintx get_hash(Value const& entry, bool* is_dead) {
195 return hash(entry._from);
196 }
197 static void* allocate_node(void* context, size_t size, Value const& value) {
198 return AllocateHeap(size, mtGC);
199 }
200 static void free_node(void* context, void* memory, Value const& value) {
201 FreeHeap(memory);
202 }
203 };
204
205 class FallbackTable : public ConcurrentHashTable<FallbackTableConfig, mtGC> {
206 public:
207 explicit FallbackTable(size_t log2size) : ConcurrentHashTable(log2size) {}
208 };
209
210 class FallbackTableLookup : public StackObj {
211 ForwardingEntry const _entry;
212 public:
213 explicit FallbackTableLookup(HeapWord* from) : _entry(from, nullptr) {}
214 uintx get_hash() const {
215 return hash(_entry._from);
216 }
217 bool equals(const ForwardingEntry* value) const {
218 return _entry._from == value->_from;
219 }
220 static bool is_dead(ForwardingEntry* value) { return false; }
221 };
222
223 template <int BITS>
224 void FullGCForwardingImpl<BITS>::initialize(MemRegion heap) {
225 #ifdef _LP64
226 _heap_start = heap.start();
227
228 size_t rounded_heap_size = MAX2(round_up_power_of_2(heap.byte_size()) / BytesPerWord, BLOCK_SIZE_WORDS);
229
230 _num_regions = rounded_heap_size / BLOCK_SIZE_WORDS;
231
232 _heap_start_region_bias = reinterpret_cast<uintptr_t>(_heap_start) >> BLOCK_SIZE_BYTES_SHIFT;
233 _region_mask = ~((static_cast<uintptr_t>(1) << BLOCK_SIZE_BYTES_SHIFT) - 1);
234
235 assert(_bases_table == nullptr, "should not be initialized yet");
236 assert(_fallback_table == nullptr, "should not be initialized yet");
237 #endif
238 }
239
240 template <int BITS>
241 void FullGCForwardingImpl<BITS>::begin() {
242 #ifdef _LP64
243 assert(_bases_table == nullptr, "should not be initialized yet");
244 assert(_fallback_table == nullptr, "should not be initialized yet");
245
246 _fallback_table = nullptr;
247
248 #ifndef PRODUCT
249 _num_forwardings = 0;
250 _num_fallback_forwardings = 0;
251 #endif
252
253 size_t max = _num_regions;
254 _bases_table = NEW_C_HEAP_ARRAY(HeapWord*, max, mtGC);
255 HeapWord** biased_start = _bases_table - _heap_start_region_bias;
256 _biased_bases = biased_start;
257 if (max == 1) {
258 // Optimize the case when the block-size >= heap-size.
259 // In this case we can use the heap-start as block-start,
260 // and don't risk that competing GC threads set a higher
261 // address as block-start, which would lead to unnecessary
262 // fallback-usage.
263 _bases_table[0] = _heap_start;
264 } else {
265 for (size_t i = 0; i < max; i++) {
266 _bases_table[i] = UNUSED_BASE;
267 }
268 }
269 #endif
270 }
271
272 template <int BITS>
273 void FullGCForwardingImpl<BITS>::end() {
274 #ifndef PRODUCT
275 size_t fallback_table_size = _fallback_table != nullptr ? _fallback_table->get_mem_size(Thread::current()) : 0;
276 log_info(gc)("Total forwardings: " UINT64_FORMAT ", fallback forwardings: " UINT64_FORMAT
277 ", ratio: %f, memory used by fallback table: %zu%s, memory used by bases table: %zu%s",
278 _num_forwardings, _num_fallback_forwardings, static_cast<float>(_num_forwardings) / static_cast<float>(_num_fallback_forwardings),
279 byte_size_in_proper_unit(fallback_table_size),
280 proper_unit_for_byte_size(fallback_table_size),
281 byte_size_in_proper_unit(sizeof(HeapWord*) * _num_regions),
282 proper_unit_for_byte_size(sizeof(HeapWord*) * _num_regions));
283 #endif
284 #ifdef _LP64
285 assert(_bases_table != nullptr, "should be initialized");
286 FREE_C_HEAP_ARRAY(HeapWord*, _bases_table);
287 _bases_table = nullptr;
288 if (_fallback_table != nullptr) {
289 delete _fallback_table;
290 _fallback_table = nullptr;
291 }
292 #endif
293 }
294
295 template <int BITS>
296 void FullGCForwardingImpl<BITS>::maybe_init_fallback_table() {
297 if (_fallback_table == nullptr) {
298 FallbackTable* fallback_table = new FallbackTable(_fallback_table_log2_start_size);
299 FallbackTable* prev = Atomic::cmpxchg(&_fallback_table, static_cast<FallbackTable*>(nullptr), fallback_table);
300 if (prev != nullptr) {
301 // Another thread won, discard our table.
302 delete fallback_table;
303 }
304 }
305 }
306
307 template <int BITS>
308 void FullGCForwardingImpl<BITS>::fallback_forward_to(HeapWord* from, HeapWord* to) {
309 assert(to != nullptr, "no null forwarding");
310 maybe_init_fallback_table();
311 assert(_fallback_table != nullptr, "should be initialized");
312 FallbackTableLookup lookup_f(from);
313 ForwardingEntry entry(from, to);
314 auto found_f = [&](ForwardingEntry* found) {
315 // If dupe has been found, override it with new value.
316 // This is also called when new entry is succussfully inserted.
317 if (found->_to != to) {
318 found->_to = to;
319 }
320 };
321 Thread* current_thread = Thread::current();
322 bool grow;
323 bool added = _fallback_table->insert_get(current_thread, lookup_f, entry, found_f, &grow);
324 NOT_PRODUCT(Atomic::inc(&_num_fallback_forwardings);)
325 #ifdef ASSERT
326 assert(fallback_forwardee(from) != nullptr, "must have entered forwarding");
327 assert(fallback_forwardee(from) == to, "forwarding must be correct, added: %s, from: " PTR_FORMAT ", to: " PTR_FORMAT ", fwd: " PTR_FORMAT, BOOL_TO_STR(added), p2i(from), p2i(to), p2i(fallback_forwardee(from)));
328 #endif
329 if (grow) {
330 _fallback_table->grow(current_thread);
331 log_debug(gc)("grow fallback table to size: %zu bytes", _fallback_table->get_mem_size(current_thread));
332 }
333 }
334
335 template <int BITS>
336 HeapWord* FullGCForwardingImpl<BITS>::fallback_forwardee(HeapWord* from) {
337 assert(_fallback_table != nullptr, "fallback table must be present");
338 HeapWord* result;
339 FallbackTableLookup lookup_f(from);
340 auto found_f = [&](const ForwardingEntry* found) {
341 result = found->_to;
342 };
343 bool found = _fallback_table->get(Thread::current(), lookup_f, found_f);
344 assert(found, "something must have been found");
345 assert(result != nullptr, "must have found forwarding");
346 return result;
347 }
348
349 #endif // SHARE_GC_SHARED_FULLGCFORWARDING_INLINE_HPP
|