1 /* 2 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_FULLGCFORWARDING_HPP 26 #define SHARE_GC_SHARED_FULLGCFORWARDING_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/memRegion.hpp" 30 #include "oops/markWord.hpp" 31 #include "oops/oopsHierarchy.hpp" 32 33 class FallbackTable; 34 class Mutex; 35 36 /** 37 * FullGCForwarding is a method to store forwarding information in a compressed form into the object header, 38 * that has been specifically designed for sliding compacting GCs and compact object headers. With compact object 39 * headers, we store the compressed class pointer in the header, which would be overwritten by full forwarding 40 * pointers, if we allow the legacy forwarding code to act. This would lose the class information for the object, 41 * which is required later in GC cycle to iterate the reference fields and get the object size for copying. 42 * 43 * FullGCForwarding requires only small side tables and guarantees constant-time access and modification. 44 * 45 * The key advantage of sliding compaction for encoding efficiency: 46 * - It forwards objects linearily, starting at the heap bottom and moving up to the top, sliding 47 * live objects towards the bottom of the heap. (The reality in parallel or regionalized GCs is a bit more 48 * complex, but conceptually it is the same.) 49 * - Objects starting in any one block can only be forwarded to a memory region that is not larger than 50 * a block. (There are exceptions to this rule which are discussed below.) 51 * 52 * This is an intuitive property: when we slide the compact block full of data, it can not take up more 53 * memory afterwards. 54 * This property allows us to use a side table to record the addresses of the target memory region for 55 * each block. The table holds N entries for N blocks. For each block, it gives the base 56 * address of the target regions, or a special placeholder if not used. 57 * 58 * This encoding efficiency allows to store the forwarding information in the object header _together_ with the 59 * compressed class pointer. 60 * 61 * The idea is to use a pointer compression scheme very similar to the one that is used for compressed oops. 62 * We divide the heap into number of equal-sized blocks. Each block spans a maximum of 2^NUM_OFFSET_BITS words. 63 * We maintain a side-table of target-base-addresses, with one address entry per block. 64 * 65 * When recording the sliding forwarding, the mark word would look roughly like this: 66 * 67 * 32 0 68 * [.....................OOOOOOOOOTT] 69 * ^------ tag-bits, indicates 'forwarded' 70 * ^-------- in-region offset 71 * ^----------------- protected area, *not touched* by this code, useful for 72 * compressed class pointer with compact object headers 73 * 74 * Adding a forwarding then generally works as follows: 75 * 1. Compute the index of the block of the "from" address. 76 * 2. Load the target-base-offset of the from-block from the side-table. 77 * 3. If the base-offset is not-yet set, set it to the to-address of the forwarding. 78 * (In other words, the first forwarding of a block determines the target base-offset.) 79 * 4. Compute the offset of the to-address in the target region. 80 * 4. Store offset in the object header. 81 * 82 * Similarly, looking up the target address, given an original object address generally works as follows: 83 * 1. Compute the index of the block of the "from" address. 84 * 2. Load the target-base-offset of the from-block from the side-table. 85 * 3. Extract the offset from the object header. 86 * 4. Compute the "to" address from "to" region base and "offset" 87 * 88 * We reserve one special value for the offset: 89 * - 111111111: Indicates an exceptional forwarding (see below), for which a fallback hash-table 90 * is used to look up the target address. 91 * 92 * In order to support this, we need to make a change to the above algorithm: 93 * - Forwardings that would use offsets >= 111111111 (i.e. the last slot) 94 * would also need to use the fallback-table. We expect that to be relatively rare for two reasons: 95 * 1. It only affects 1 out of 512 possible offsets, in other words, 1/512th of all situations in an equal 96 * distribution. 97 * 2. Forwardings are not equally-distributed, because normally we 'skip' unreachable objects, 98 * thus compacting the block. Forwardings tend to cluster at the beginning of the target region, 99 * and become less likely towards the end of the possible encodable target address range. 100 * Which means in reality it will be much less frequent than 1/512. 101 * 102 * There are several conditions when the above algorithm would be broken because the assumption that 103 * 'objects from each block can only get forwarded to a region of block-size' is violated: 104 * - G1 last-ditch serial compaction: there, object from a single region can be forwarded to multiple, 105 * more than two regions. G1 serial compaction is not very common - it is the last-last-ditch GC 106 * that is used when the JVM is scrambling to squeeze more space out of the heap, and at that point, 107 * ultimate performance is no longer the main concern. 108 * - When forwarding hits a space (or G1/Shenandoah region) boundary, then latter objects of a block 109 * need to be forwarded to a different address range than earlier objects in the same block. 110 * This is rare. 111 * - With compact identity hash-code, objects can grow, and in the worst case use up more memory in 112 * the target block than we can address. We expect that to be rare. 113 * 114 * To deal with that, we initialize a fallback-hashtable for storing those extra forwardings, and use a special 115 * offset pattern (0b11...1) to indicate that the forwardee is not encoded but should be looked-up in the hashtable. 116 * This implies that this particular offset (the last word of a block) can not be used directly as forwarding, 117 * but also has to be handled by the fallback-table. 118 */ 119 class FullGCForwarding : public AllStatic { 120 private: 121 static constexpr int AVAILABLE_LOW_BITS = 11; 122 static constexpr int AVAILABLE_BITS_MASK = right_n_bits(AVAILABLE_LOW_BITS); 123 // The offset bits start after the lock-bits, which are currently used by Serial GC 124 // for marking objects. Could be 1 for Serial GC when being clever with the bits, 125 // and 0 for all other GCs. 126 static constexpr int OFFSET_BITS_SHIFT = markWord::lock_shift + markWord::lock_bits; 127 128 // How many bits we use for the offset 129 static constexpr int NUM_OFFSET_BITS = AVAILABLE_LOW_BITS - OFFSET_BITS_SHIFT; 130 static constexpr size_t BLOCK_SIZE_WORDS = 1 << NUM_OFFSET_BITS; 131 static constexpr int BLOCK_SIZE_BYTES_SHIFT = NUM_OFFSET_BITS + LogHeapWordSize; 132 static constexpr size_t MAX_OFFSET = BLOCK_SIZE_WORDS - 2; 133 static constexpr uintptr_t OFFSET_MASK = right_n_bits(NUM_OFFSET_BITS) << OFFSET_BITS_SHIFT; 134 135 // This offset bit-pattern indicates that the actual mapping is handled by the 136 // fallback-table. This also implies that this cannot be used as a valid offset, 137 // and we must also use the fallback-table for mappings to the last word of a 138 // block. 139 static constexpr uintptr_t FALLBACK_PATTERN = right_n_bits(NUM_OFFSET_BITS); 140 static constexpr uintptr_t FALLBACK_PATTERN_IN_PLACE = FALLBACK_PATTERN << OFFSET_BITS_SHIFT; 141 142 // Indicates an unused base address in the target base table. 143 static HeapWord* const UNUSED_BASE; 144 145 static HeapWord* _heap_start; 146 147 static size_t _heap_start_region_bias; 148 static size_t _num_regions; 149 static uintptr_t _region_mask; 150 151 // The target base table memory. 152 static HeapWord** _bases_table; 153 // Entries into the target base tables, biased to the start of the heap. 154 static HeapWord** _biased_bases; 155 156 static FallbackTable* _fallback_table; 157 158 #ifndef PRODUCT 159 static volatile uint64_t _num_forwardings; 160 static volatile uint64_t _num_fallback_forwardings; 161 #endif 162 163 static inline size_t biased_region_index_containing(HeapWord* addr); 164 165 static inline bool is_fallback(uintptr_t encoded); 166 static inline uintptr_t encode_forwarding(HeapWord* from, HeapWord* to); 167 static inline HeapWord* decode_forwarding(HeapWord* from, uintptr_t encoded); 168 169 static void fallback_forward_to(HeapWord* from, HeapWord* to); 170 static HeapWord* fallback_forwardee(HeapWord* from); 171 172 static inline void forward_to_impl(oop from, oop to); 173 static inline oop forwardee_impl(oop from); 174 175 public: 176 static void initialize(MemRegion heap); 177 178 static void begin(); 179 static void end(); 180 181 static inline bool is_forwarded(oop obj); 182 static inline bool is_not_forwarded(oop obj); 183 184 static inline void forward_to(oop from, oop to); 185 static inline oop forwardee(oop from); 186 }; 187 188 #endif // SHARE_GC_SHARED_FULLGCFORWARDING_HPP