7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/fullGCForwarding.hpp"
27 #include "memory/memRegion.hpp"
28 #include "runtime/globals_extension.hpp"
29
30 HeapWord* FullGCForwarding::_heap_base = nullptr;
31 int FullGCForwarding::_num_low_bits = 0;
32
33 void FullGCForwarding::initialize_flags(size_t max_heap_size) {
34 #ifdef _LP64
35 size_t max_narrow_heap_size = right_n_bits(NumLowBitsNarrow - Shift);
36 if (UseCompactObjectHeaders && max_heap_size > max_narrow_heap_size * HeapWordSize) {
37 warning("Compact object headers require a java heap size smaller than " SIZE_FORMAT
38 "%s (given: " SIZE_FORMAT "%s). Disabling compact object headers.",
39 byte_size_in_proper_unit(max_narrow_heap_size * HeapWordSize),
40 proper_unit_for_byte_size(max_narrow_heap_size * HeapWordSize),
41 byte_size_in_proper_unit(max_heap_size),
42 proper_unit_for_byte_size(max_heap_size));
43 FLAG_SET_ERGO(UseCompactObjectHeaders, false);
44 }
45 #endif
46 }
47
48 void FullGCForwarding::initialize(MemRegion heap) {
49 #ifdef _LP64
50 _heap_base = heap.start();
51 if (UseCompactObjectHeaders) {
52 _num_low_bits = NumLowBitsNarrow;
53 } else {
54 _num_low_bits = NumLowBitsWide;
55 }
56 #endif
57 }
|
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/fullGCForwarding.hpp"
27 #include "logging/log.hpp"
28 #include "nmt/memTag.hpp"
29 #include "utilities/ostream.hpp"
30 #include "utilities/concurrentHashTable.inline.hpp"
31 #include "utilities/fastHash.hpp"
32 #include "utilities/powerOfTwo.hpp"
33
34 static uintx hash(HeapWord* const& addr) {
35 uint64_t val = reinterpret_cast<uint64_t>(addr);
36 uint32_t hash = FastHash::get_hash32((uint32_t)val, (uint32_t)(val >> 32));
37 return hash;
38 }
39
40 struct ForwardingEntry {
41 HeapWord* _from;
42 HeapWord* _to;
43 ForwardingEntry(HeapWord* from, HeapWord* to) : _from(from), _to(to) {}
44 };
45
46 struct FallbackTableConfig {
47 using Value = ForwardingEntry;
48 static uintx get_hash(Value const& entry, bool* is_dead) {
49 return hash(entry._from);
50 }
51 static void* allocate_node(void* context, size_t size, Value const& value) {
52 return AllocateHeap(size, mtGC);
53 }
54 static void free_node(void* context, void* memory, Value const& value) {
55 FreeHeap(memory);
56 }
57 };
58
59 class FallbackTable : public ConcurrentHashTable<FallbackTableConfig, mtGC> {
60
61 };
62
63 class FallbackTableLookup : public StackObj {
64 ForwardingEntry const _entry;
65 public:
66 explicit FallbackTableLookup(HeapWord* from) : _entry(from, nullptr) {}
67 uintx get_hash() const {
68 return hash(_entry._from);
69 }
70 bool equals(ForwardingEntry* value) {
71 return _entry._from == value->_from;
72 }
73 bool is_dead(ForwardingEntry* value) { return false; }
74 };
75
76 // We cannot use 0, because that may already be a valid base address in zero-based heaps.
77 // 0x1 is safe because heap base addresses must be aligned by much larger alignment
78 HeapWord* const FullGCForwarding::UNUSED_BASE = reinterpret_cast<HeapWord*>(0x1);
79
80 HeapWord* FullGCForwarding::_heap_start = nullptr;
81 size_t FullGCForwarding::_heap_start_region_bias = 0;
82 size_t FullGCForwarding::_num_regions = 0;
83 uintptr_t FullGCForwarding::_region_mask = 0;
84 HeapWord** FullGCForwarding::_biased_bases = nullptr;
85 HeapWord** FullGCForwarding::_bases_table = nullptr;
86 FallbackTable* FullGCForwarding::_fallback_table = nullptr;
87 #ifndef PRODUCT
88 volatile uint64_t FullGCForwarding::_num_forwardings = 0;
89 volatile uint64_t FullGCForwarding::_num_fallback_forwardings = 0;
90 #endif
91
92 void FullGCForwarding::initialize(MemRegion heap) {
93 #ifdef _LP64
94 _heap_start = heap.start();
95
96 size_t rounded_heap_size = round_up_power_of_2(heap.byte_size());
97
98 _num_regions = (rounded_heap_size / BytesPerWord) / BLOCK_SIZE_WORDS;
99
100 _heap_start_region_bias = (uintptr_t)_heap_start >> BLOCK_SIZE_BYTES_SHIFT;
101 _region_mask = ~((uintptr_t(1) << BLOCK_SIZE_BYTES_SHIFT) - 1);
102
103 assert(_bases_table == nullptr, "should not be initialized yet");
104 assert(_fallback_table == nullptr, "should not be initialized yet");
105 #endif
106 }
107
108 void FullGCForwarding::begin() {
109 #ifdef _LP64
110 assert(_bases_table == nullptr, "should not be initialized yet");
111 assert(_fallback_table == nullptr, "should not be initialized yet");
112
113 _fallback_table = new FallbackTable();
114
115 #ifndef PRODUCT
116 _num_forwardings = 0;
117 _num_fallback_forwardings = 0;
118 #endif
119
120 size_t max = _num_regions;
121 _bases_table = NEW_C_HEAP_ARRAY(HeapWord*, max, mtGC);
122 HeapWord** biased_start = _bases_table - _heap_start_region_bias;
123 _biased_bases = biased_start;
124 for (size_t i = 0; i < max; i++) {
125 _bases_table[i] = UNUSED_BASE;
126 }
127 #endif
128 }
129
130 void FullGCForwarding::end() {
131 #ifndef PRODUCT
132 log_info(gc)("Total forwardings: " UINT64_FORMAT ", fallback forwardings: " UINT64_FORMAT
133 ", ratio: %f, memory used by fallback table: " SIZE_FORMAT "%s, memory used by bases table: " SIZE_FORMAT "%s",
134 _num_forwardings, _num_fallback_forwardings, (float)_num_forwardings/(float)_num_fallback_forwardings,
135 byte_size_in_proper_unit(_fallback_table->get_mem_size(Thread::current())),
136 proper_unit_for_byte_size(_fallback_table->get_mem_size(Thread::current())),
137 byte_size_in_proper_unit(sizeof(HeapWord*) * _num_regions),
138 proper_unit_for_byte_size(sizeof(HeapWord*) * _num_regions));
139 #endif
140 #ifdef _LP64
141 assert(_bases_table != nullptr, "should be initialized");
142 FREE_C_HEAP_ARRAY(HeapWord*, _bases_table);
143 _bases_table = nullptr;
144 delete _fallback_table;
145 _fallback_table = nullptr;
146 #endif
147 }
148
149 void FullGCForwarding::fallback_forward_to(HeapWord* from, HeapWord* to) {
150 assert(to != nullptr, "no null forwarding");
151 assert(_fallback_table != nullptr, "should be initialized");
152 FallbackTableLookup lookup_f(from);
153 ForwardingEntry entry(from, to);
154 auto found_f = [&](ForwardingEntry* found) {
155 // If dupe has been found, override it with new value.
156 // This is also called when new entry is succussfully inserted.
157 if (found->_to != to) {
158 found->_to = to;
159 }
160 };
161 Thread* current_thread = Thread::current();
162 bool grow;
163 bool added = _fallback_table->insert_get(current_thread, lookup_f, entry, found_f, &grow);
164 NOT_PRODUCT(Atomic::inc(&_num_fallback_forwardings);)
165 #ifdef ASSERT
166 assert(fallback_forwardee(from) != nullptr, "must have entered forwarding");
167 assert(fallback_forwardee(from) == to, "forwarding must be correct, added: %s, from: " PTR_FORMAT ", to: " PTR_FORMAT ", fwd: " PTR_FORMAT, BOOL_TO_STR(added), p2i(from), p2i(to), p2i(fallback_forwardee(from)));
168 #endif
169 if (grow) {
170 _fallback_table->grow(current_thread);
171 tty->print_cr("grow fallback table to size: " SIZE_FORMAT " bytes",
172 _fallback_table->get_mem_size(current_thread));
173 }
174 }
175
176 HeapWord* FullGCForwarding::fallback_forwardee(HeapWord* from) {
177 assert(_fallback_table != nullptr, "fallback table must be present");
178 HeapWord* result;
179 FallbackTableLookup lookup_f(from);
180 auto found_f = [&](ForwardingEntry* found) {
181 result = found->_to;
182 };
183 bool found = _fallback_table->get(Thread::current(), lookup_f, found_f);
184 assert(found, "something must have been found");
185 assert(result != nullptr, "must have found forwarding");
186 return result;
187 }
|