< prev index next >

src/hotspot/share/gc/shared/fullGCForwarding.inline.hpp

Print this page

  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef GC_SHARED_FULLGCFORWARDING_INLINE_HPP
 26 #define GC_SHARED_FULLGCFORWARDING_INLINE_HPP
 27 
 28 #include "gc/shared/fullGCForwarding.hpp"
 29 



 30 #include "oops/oop.inline.hpp"
 31 #include "utilities/globalDefinitions.hpp"

















































































 32 
 33 void FullGCForwarding::forward_to(oop from, oop to) {






























 34 #ifdef _LP64
 35   uintptr_t encoded = pointer_delta(cast_from_oop<HeapWord*>(to), _heap_base) << Shift;
 36   assert(encoded <= static_cast<uintptr_t>(right_n_bits(_num_low_bits)), "encoded forwardee must fit");
 37   uintptr_t mark = from->mark().value();
 38   mark &= ~right_n_bits(_num_low_bits);
 39   mark |= (encoded | markWord::marked_value);
 40   from->set_mark(markWord(mark));
 41 #else
 42   from->forward_to(to);
 43 #endif
 44 }
 45 
 46 oop FullGCForwarding::forwardee(oop from) {
















 47 #ifdef _LP64
 48   uintptr_t mark = from->mark().value();
 49   HeapWord* decoded = _heap_base + ((mark & right_n_bits(_num_low_bits)) >> Shift);
 50   return cast_to_oop(decoded);
 51 #else
 52   return from->forwardee();























































































































































 53 #endif




 54 }
 55 
 56 bool FullGCForwarding::is_forwarded(oop obj) {
 57   return obj->mark().is_forwarded();










 58 }
 59 
 60 #endif // GC_SHARED_FULLGCFORWARDING_INLINE_HPP

  2  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.

 22  */
 23 
 24 #ifndef SHARE_GC_SHARED_FULLGCFORWARDING_INLINE_HPP
 25 #define SHARE_GC_SHARED_FULLGCFORWARDING_INLINE_HPP
 26 
 27 #include "gc/shared/fullGCForwarding.hpp"
 28 
 29 #include "logging/log.hpp"
 30 #include "nmt/memTag.hpp"
 31 #include "oops/markWord.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "utilities/concurrentHashTable.inline.hpp"
 34 #include "utilities/fastHash.hpp"
 35 #include "utilities/macros.hpp"
 36 #include "utilities/powerOfTwo.hpp"
 37 
 38 // We cannot use 0, because that may already be a valid base address in zero-based heaps.
 39 // 0x1 is safe because heap base addresses must be aligned by much larger alignment
 40 template <int BITS>
 41 HeapWord* const FullGCForwardingImpl<BITS>::UNUSED_BASE = reinterpret_cast<HeapWord*>(0x1);
 42 
 43 template <int BITS>
 44 HeapWord* FullGCForwardingImpl<BITS>::_heap_start = nullptr;
 45 template <int BITS>
 46 size_t FullGCForwardingImpl<BITS>::_heap_start_region_bias = 0;
 47 template <int BITS>
 48 size_t FullGCForwardingImpl<BITS>::_num_regions = 0;
 49 template <int BITS>
 50 uintptr_t FullGCForwardingImpl<BITS>::_region_mask = 0;
 51 template <int BITS>
 52 HeapWord** FullGCForwardingImpl<BITS>::_biased_bases = nullptr;
 53 template <int BITS>
 54 HeapWord** FullGCForwardingImpl<BITS>::_bases_table = nullptr;
 55 template <int BITS>
 56 size_t FullGCForwardingImpl<BITS>::_fallback_table_log2_start_size = 9; // 512 entries.
 57 template <int BITS>
 58 FallbackTable* FullGCForwardingImpl<BITS>::_fallback_table = nullptr;
 59 #ifndef PRODUCT
 60 template <int BITS>
 61 volatile uint64_t FullGCForwardingImpl<BITS>::_num_forwardings = 0;
 62 template <int BITS>
 63 volatile uint64_t FullGCForwardingImpl<BITS>::_num_fallback_forwardings = 0;
 64 #endif
 65 
 66 template <int BITS>
 67 bool FullGCForwardingImpl<BITS>::is_forwarded(oop obj) {
 68   return obj->is_forwarded();
 69 }
 70 
 71 template <int BITS>
 72 size_t FullGCForwardingImpl<BITS>::biased_region_index_containing(HeapWord* addr) {
 73   return reinterpret_cast<uintptr_t>(addr) >> BLOCK_SIZE_BYTES_SHIFT;
 74 }
 75 
 76 template <int BITS>
 77 bool FullGCForwardingImpl<BITS>::is_fallback(uintptr_t encoded) {
 78   return (encoded & OFFSET_MASK) == FALLBACK_PATTERN_IN_PLACE;
 79 }
 80 
 81 template <int BITS>
 82 uintptr_t FullGCForwardingImpl<BITS>::encode_forwarding(HeapWord* from, HeapWord* to) {
 83   size_t from_block_idx = biased_region_index_containing(from);
 84 
 85   HeapWord* to_region_base = _biased_bases[from_block_idx];
 86   if (to_region_base == UNUSED_BASE) {
 87     HeapWord* prev = AtomicAccess::cmpxchg(&_biased_bases[from_block_idx], UNUSED_BASE, to);
 88     if (prev == UNUSED_BASE) {
 89       to_region_base = to;
 90     } else {
 91       to_region_base = prev;
 92     }
 93     // _biased_bases[from_block_idx] = to_region_base = to;
 94   }
 95   // Avoid pointer_delta() on purpose: using an unsigned subtraction,
 96   // we get an underflow when to < to_region_base, which means
 97   // we can use a single comparison instead of:
 98   // if (to_region_base > to || (to - to_region_base) > MAX_OFFSET) { .. }
 99   size_t offset = static_cast<size_t>(to - to_region_base);
100   if (offset > MAX_OFFSET) {
101     offset = FALLBACK_PATTERN;
102   }
103   uintptr_t encoded = (offset << OFFSET_BITS_SHIFT) | markWord::marked_value;
104 
105   assert(is_fallback(encoded) || to == decode_forwarding(from, encoded), "must be reversible: " PTR_FORMAT " -> " PTR_FORMAT ", reversed: " PTR_FORMAT ", encoded: " INTPTR_FORMAT ", to_region_base: " PTR_FORMAT ", from_block_idx: %lu", p2i(from), p2i(to), p2i(decode_forwarding(from, encoded)), encoded, p2i(to_region_base), from_block_idx);
106   assert((encoded & ~AVAILABLE_BITS_MASK) == 0, "must encode to available bits");
107   return encoded;
108 }
109 
110 template <int BITS>
111 HeapWord* FullGCForwardingImpl<BITS>::decode_forwarding(HeapWord* from, uintptr_t encoded) {
112   assert(!is_fallback(encoded), "must not be fallback-forwarded, encoded: " INTPTR_FORMAT ", OFFSET_MASK: " INTPTR_FORMAT ", FALLBACK_PATTERN_IN_PLACE: " INTPTR_FORMAT, encoded, OFFSET_MASK, FALLBACK_PATTERN_IN_PLACE);
113   assert((encoded & ~AVAILABLE_BITS_MASK) == 0, "must decode from available bits, encoded: " INTPTR_FORMAT, encoded);
114   uintptr_t offset = (encoded >> OFFSET_BITS_SHIFT);
115 
116   size_t from_idx = biased_region_index_containing(from);
117   HeapWord* base = _biased_bases[from_idx];
118   assert(base != UNUSED_BASE, "must not be unused base: encoded: " INTPTR_FORMAT, encoded);
119   HeapWord* decoded = base + offset;
120   assert(decoded >= _heap_start,
121          "Address must be above heap start. encoded: " INTPTR_FORMAT ", base: " PTR_FORMAT,
122           encoded, p2i(base));
123 
124   return decoded;
125 }
126 
127 template <int BITS>
128 void FullGCForwardingImpl<BITS>::forward_to_impl(oop from, oop to) {
129   assert(_bases_table != nullptr, "call begin() before forwarding");
130 
131   markWord from_header = from->mark();
132   HeapWord* from_hw = cast_from_oop<HeapWord*>(from);
133   HeapWord* to_hw   = cast_from_oop<HeapWord*>(to);
134   uintptr_t encoded = encode_forwarding(from_hw, to_hw);
135   markWord new_header = markWord((from_header.value() & ~OFFSET_MASK) | encoded);
136   from->set_mark(new_header);
137 
138   if (is_fallback(encoded)) {
139     fallback_forward_to(from_hw, to_hw);
140   }
141   NOT_PRODUCT(AtomicAccess::inc(&_num_forwardings);)
142 }
143 
144 template <int BITS>
145 void FullGCForwardingImpl<BITS>::forward_to(oop obj, oop fwd) {
146   assert(fwd != nullptr, "no null forwarding");
147 #ifdef _LP64
148   assert(_bases_table != nullptr, "expect sliding forwarding initialized");
149   forward_to_impl(obj, fwd);
150   // assert(forwardee(obj) == fwd, "must be forwarded to correct forwardee, obj: " PTR_FORMAT ", forwardee(obj): " PTR_FORMAT ", fwd: " PTR_FORMAT ", mark: " INTPTR_FORMAT ", num-regions: %lu, base: " PTR_FORMAT ", OFFSET_MASK: " INTPTR_FORMAT ", encoded: " PTR_FORMAT ", biased-base: " PTR_FORMAT ", heap-start: " PTR_FORMAT, p2i(obj), p2i(forwardee(obj)), p2i(fwd), obj->mark().value(), _num_regions, p2i(_bases_table[0]), OFFSET_MASK, encode_forwarding(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(fwd)), p2i(_biased_bases[biased_region_index_containing(cast_from_oop<HeapWord*>(obj))]), p2i(_heap_start));



151 #else
152   obj->forward_to(fwd);
153 #endif
154 }
155 
156 template <int BITS>
157 oop FullGCForwardingImpl<BITS>::forwardee_impl(oop from) {
158   assert(_bases_table != nullptr, "call begin() before asking for forwarding");
159 
160   markWord header = from->mark();
161   HeapWord* from_hw = cast_from_oop<HeapWord*>(from);
162   if (is_fallback(header.value())) {
163     HeapWord* to = fallback_forwardee(from_hw);
164     return cast_to_oop(to);
165   }
166   uintptr_t encoded = header.value() & OFFSET_MASK;
167   HeapWord* to = decode_forwarding(from_hw, encoded);
168   return cast_to_oop(to);
169 }
170 
171 template <int BITS>
172 oop FullGCForwardingImpl<BITS>::forwardee(oop obj) {
173 #ifdef _LP64
174   assert(_bases_table != nullptr, "expect sliding forwarding initialized");
175   return forwardee_impl(obj);

176 #else
177   return obj->forwardee();
178 #endif
179 }
180 
181 static uintx hash(HeapWord* const& addr) {
182   uint64_t val = reinterpret_cast<uint64_t>(addr);
183   uint32_t hash = FastHash::get_hash32(static_cast<uint32_t>(val), static_cast<uint32_t>(val >> 32));
184   return hash;
185 }
186 
187 struct ForwardingEntry {
188   HeapWord* _from;
189   HeapWord* _to;
190   ForwardingEntry(HeapWord* from, HeapWord* to) : _from(from), _to(to) {}
191 };
192 
193 struct FallbackTableConfig {
194   using Value = ForwardingEntry;
195   static uintx get_hash(Value const& entry, bool* is_dead) {
196     return hash(entry._from);
197   }
198   static void* allocate_node(void* context, size_t size, Value const& value) {
199     return AllocateHeap(size, mtGC);
200   }
201   static void free_node(void* context, void* memory, Value const& value) {
202     FreeHeap(memory);
203   }
204 };
205 
206 class FallbackTable : public ConcurrentHashTable<FallbackTableConfig, mtGC> {
207 public:
208   explicit FallbackTable(size_t log2size) : ConcurrentHashTable(log2size) {}
209 };
210 
211 class FallbackTableLookup : public StackObj {
212   ForwardingEntry const _entry;
213 public:
214   explicit FallbackTableLookup(HeapWord* from) : _entry(from, nullptr) {}
215   uintx get_hash() const {
216     return hash(_entry._from);
217   }
218   bool equals(const ForwardingEntry* value) const {
219     return _entry._from == value->_from;
220   }
221   static bool is_dead(ForwardingEntry* value) { return false; }
222 };
223 
224 template <int BITS>
225 void FullGCForwardingImpl<BITS>::initialize(MemRegion heap) {
226 #ifdef _LP64
227   _heap_start = heap.start();
228 
229   size_t rounded_heap_size = MAX2(round_up_power_of_2(heap.byte_size()) / BytesPerWord, BLOCK_SIZE_WORDS);
230 
231   _num_regions = rounded_heap_size / BLOCK_SIZE_WORDS;
232 
233   _heap_start_region_bias = reinterpret_cast<uintptr_t>(_heap_start) >> BLOCK_SIZE_BYTES_SHIFT;
234   _region_mask = ~((static_cast<uintptr_t>(1) << BLOCK_SIZE_BYTES_SHIFT) - 1);
235 
236   assert(_bases_table == nullptr, "should not be initialized yet");
237   assert(_fallback_table == nullptr, "should not be initialized yet");
238 #endif
239 }
240 
241 template <int BITS>
242 void FullGCForwardingImpl<BITS>::begin() {
243 #ifdef _LP64
244   assert(_bases_table == nullptr, "should not be initialized yet");
245   assert(_fallback_table == nullptr, "should not be initialized yet");
246 
247   _fallback_table = nullptr;
248 
249 #ifndef PRODUCT
250   _num_forwardings = 0;
251   _num_fallback_forwardings = 0;
252 #endif
253 
254   size_t max = _num_regions;
255   _bases_table = NEW_C_HEAP_ARRAY(HeapWord*, max, mtGC);
256   HeapWord** biased_start = _bases_table - _heap_start_region_bias;
257   _biased_bases = biased_start;
258   if (max == 1) {
259     // Optimize the case when the block-size >= heap-size.
260     // In this case we can use the heap-start as block-start,
261     // and don't risk that competing GC threads set a higher
262     // address as block-start, which would lead to unnecessary
263     // fallback-usage.
264     _bases_table[0] = _heap_start;
265   } else {
266     for (size_t i = 0; i < max; i++) {
267       _bases_table[i] = UNUSED_BASE;
268     }
269   }
270 #endif
271 }
272 
273 template <int BITS>
274 void FullGCForwardingImpl<BITS>::end() {
275 #ifndef PRODUCT
276   size_t fallback_table_size = _fallback_table != nullptr ? _fallback_table->get_mem_size(Thread::current()) : 0;
277   log_info(gc)("Total forwardings: " UINT64_FORMAT ", fallback forwardings: " UINT64_FORMAT
278                 ", ratio: %f, memory used by fallback table: %zu%s, memory used by bases table: %zu%s",
279                _num_forwardings, _num_fallback_forwardings, static_cast<float>(_num_forwardings) / static_cast<float>(_num_fallback_forwardings),
280                byte_size_in_proper_unit(fallback_table_size),
281                proper_unit_for_byte_size(fallback_table_size),
282                byte_size_in_proper_unit(sizeof(HeapWord*) * _num_regions),
283                proper_unit_for_byte_size(sizeof(HeapWord*) * _num_regions));
284 #endif
285 #ifdef _LP64
286   assert(_bases_table != nullptr, "should be initialized");
287   FREE_C_HEAP_ARRAY(HeapWord*, _bases_table);
288   _bases_table = nullptr;
289   if (_fallback_table != nullptr) {
290     delete _fallback_table;
291     _fallback_table = nullptr;
292   }
293 #endif
294 }
295 
296 template <int BITS>
297 void FullGCForwardingImpl<BITS>::maybe_init_fallback_table() {
298   if (_fallback_table == nullptr) {
299     FallbackTable* fallback_table = new FallbackTable(_fallback_table_log2_start_size);
300     FallbackTable* prev = AtomicAccess::cmpxchg(&_fallback_table, static_cast<FallbackTable*>(nullptr), fallback_table);
301     if (prev != nullptr) {
302       // Another thread won, discard our table.
303       delete fallback_table;
304     }
305   }
306 }
307 
308 template <int BITS>
309 void FullGCForwardingImpl<BITS>::fallback_forward_to(HeapWord* from, HeapWord* to) {
310   assert(to != nullptr, "no null forwarding");
311   maybe_init_fallback_table();
312   assert(_fallback_table != nullptr, "should be initialized");
313   FallbackTableLookup lookup_f(from);
314   ForwardingEntry entry(from, to);
315   auto found_f = [&](ForwardingEntry* found) {
316     // If dupe has been found, override it with new value.
317     // This is also called when new entry is succussfully inserted.
318     if (found->_to != to) {
319       found->_to = to;
320     }
321   };
322   Thread* current_thread = Thread::current();
323   bool grow;
324   bool added = _fallback_table->insert_get(current_thread, lookup_f, entry, found_f, &grow);
325   NOT_PRODUCT(AtomicAccess::inc(&_num_fallback_forwardings);)
326 #ifdef ASSERT
327   assert(fallback_forwardee(from) != nullptr, "must have entered forwarding");
328   assert(fallback_forwardee(from) == to, "forwarding must be correct, added: %s, from: " PTR_FORMAT ", to: " PTR_FORMAT ", fwd: " PTR_FORMAT, BOOL_TO_STR(added), p2i(from), p2i(to), p2i(fallback_forwardee(from)));
329 #endif
330   if (grow) {
331     _fallback_table->grow(current_thread);
332     log_debug(gc)("grow fallback table to size: %zu bytes", _fallback_table->get_mem_size(current_thread));
333   }
334 }
335 
336 template <int BITS>
337 HeapWord* FullGCForwardingImpl<BITS>::fallback_forwardee(HeapWord* from) {
338   assert(_fallback_table != nullptr, "fallback table must be present");
339   HeapWord* result;
340   FallbackTableLookup lookup_f(from);
341   auto found_f = [&](const ForwardingEntry* found) {
342     result = found->_to;
343   };
344   bool found = _fallback_table->get(Thread::current(), lookup_f, found_f);
345   assert(found, "something must have been found");
346   assert(result != nullptr, "must have found forwarding");
347   return result;
348 }
349 
350 #endif // SHARE_GC_SHARED_FULLGCFORWARDING_INLINE_HPP
< prev index next >