1 /*
2 * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "logging/log.hpp"
26 #include "memory/metaspace.hpp"
27 #include "oops/compressedKlass.inline.hpp"
28 #include "runtime/globals.hpp"
29 #include "runtime/java.hpp"
30 #include "runtime/os.hpp"
31 #include "utilities/debug.hpp"
32 #include "utilities/formatBuffer.hpp"
33 #include "utilities/globalDefinitions.hpp"
34 #include "utilities/ostream.hpp"
35
36 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
37 int CompressedKlassPointers::_max_shift = -1;
38
39 address CompressedKlassPointers::_base = (address)-1;
40 int CompressedKlassPointers::_shift = -1;
41 address CompressedKlassPointers::_klass_range_start = nullptr;
42 address CompressedKlassPointers::_klass_range_end = nullptr;
43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
45 size_t CompressedKlassPointers::_protection_zone_size = 0;
46
47 #ifdef _LP64
48
49 size_t CompressedKlassPointers::max_klass_range_size() {
50 #ifdef _LP64
51 const size_t encoding_allows = nth_bit(narrow_klass_pointer_bits() + max_shift());
52 assert(!UseCompactObjectHeaders || max_klass_range_size_coh == encoding_allows, "Sanity");
53 constexpr size_t cap = 4 * G;
54 return MIN2(encoding_allows, cap);
55 #else
56 // 32-bit: only 32-bit "narrow" Klass pointers allowed. If we ever support smaller narrow
57 // Klass pointers here, coding needs to be revised.
58 // We keep one page safety zone free to guard against size_t overflows on 32-bit. In practice
59 // this is irrelevant because these upper address space parts are not user-addressable on
60 // any of our 32-bit platforms.
61 return align_down(UINT_MAX, os::vm_page_size());
62 #endif
63 }
64
65 void CompressedKlassPointers::pre_initialize() {
66 if (UseCompactObjectHeaders) {
67 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
68 _max_shift = max_shift_coh;
69 } else {
70 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
71 _max_shift = max_shift_noncoh;
72 }
73 }
74
75 #ifdef ASSERT
76 void CompressedKlassPointers::sanity_check_after_initialization() {
77 // In expectation of an assert, prepare condensed info to be printed with the assert.
78 char tmp[256];
79 os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
80 " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
81 RANGE2FMTARGS(_klass_range_start, _klass_range_end),
82 p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
83 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
84 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
85
86 // There is no technical reason preventing us from using other klass pointer bit lengths,
87 // but it should be a deliberate choice
88 ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19);
89
90 // All values must be inited
91 ASSERT_HERE(_max_shift != -1);
92 ASSERT_HERE(_klass_range_start != (address)-1);
93 ASSERT_HERE(_klass_range_end != (address)-1);
94 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
95 ASSERT_HERE(_base != (address)-1);
96 ASSERT_HERE(_shift != -1);
97
98 const size_t klass_align = klass_alignment_in_bytes();
99
100 // must be aligned enough hold 64-bit data
101 ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
102
103 // should be smaller than the minimum metaspace chunk size (soft requirement)
104 ASSERT_HERE(klass_align <= K);
105
106 ASSERT_HERE(_klass_range_end > _klass_range_start);
107
108 // Check that Klass range is fully engulfed in the encoding range
109 const address encoding_start = _base;
110 const address encoding_end = (address)(p2u(_base) + (uintptr_t)nth_bit(narrow_klass_pointer_bits() + _shift));
111 ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end,
112 "Resulting encoding range does not fully cover the class range");
113
114 // Check that Klass range is aligned to Klass alignment. Note that this should never be
115 // an issue since the Klass range is handed in by either CDS- or Metaspace-initialization, and
116 // it should be the result of an mmap operation that operates on page sizes. So as long as
117 // the Klass alignment is <= page size, we are fine.
118 ASSERT_HERE_2(is_aligned(_klass_range_start, klass_align) &&
119 is_aligned(_klass_range_end, klass_align),
120 "Klass range must start and end at a properly aligned address");
121
122 // Check _lowest_valid_narrow_klass_id and _highest_valid_narrow_klass_id
123 ASSERT_HERE_2(_lowest_valid_narrow_klass_id > 0, "Null is not a valid narrowKlass");
124 ASSERT_HERE(_highest_valid_narrow_klass_id > _lowest_valid_narrow_klass_id);
125
126 Klass* const k1 = decode_not_null_without_asserts(_lowest_valid_narrow_klass_id, _base, _shift);
127 if (encoding_start == _klass_range_start) {
128 ASSERT_HERE_2((address)k1 == _klass_range_start + klass_align, "Not lowest");
129 } else {
130 ASSERT_HERE_2((address)k1 == _klass_range_start, "Not lowest");
131 }
132 narrowKlass nk1 = encode_not_null_without_asserts(k1, _base, _shift);
133 ASSERT_HERE_2(nk1 == _lowest_valid_narrow_klass_id, "not reversible");
134
135 Klass* const k2 = decode_not_null_without_asserts(_highest_valid_narrow_klass_id, _base, _shift);
136 ASSERT_HERE((address)k2 == _klass_range_end - klass_align);
137 narrowKlass nk2 = encode_not_null_without_asserts(k2, _base, _shift);
138 ASSERT_HERE_2(nk2 == _highest_valid_narrow_klass_id, "not reversible");
139
140 #ifdef AARCH64
141 // On aarch64, we never expect a shift value > 0 in standard (non-coh) mode
142 ASSERT_HERE_2(UseCompactObjectHeaders || _shift == 0, "Shift > 0 in non-coh mode?");
143 #endif
144 #undef ASSERT_HERE
145 #undef ASSERT_HERE_2
146 }
147 #endif // ASSERT
148
149 // Helper function: given current Klass Range, Base and Shift, calculate the lowest and highest values
150 // of narrowKlass we can expect.
151 void CompressedKlassPointers::calc_lowest_highest_narrow_klass_id() {
152 address lowest_possible_klass_location = _klass_range_start;
153
154 // A Klass will never be placed at the Encoding range start, since that would translate to a narrowKlass=0, which
155 // is disallowed. Note that both Metaspace and CDS prvent allocation at the first address for this reason.
156 if (lowest_possible_klass_location == _base) {
157 lowest_possible_klass_location += klass_alignment_in_bytes();
158 }
159 _lowest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(lowest_possible_klass_location - _base) >> _shift);
160
161 address highest_possible_klass_location = _klass_range_end - klass_alignment_in_bytes();
162 _highest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(highest_possible_klass_location - _base) >> _shift);
163 }
164
165 // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then
166 // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
167 // archived heap objects.
168 void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) {
169 if (len > max_klass_range_size()) {
170 stringStream ss;
171 ss.print("Class space size and CDS archive size combined (%zu) "
172 "exceed the maximum possible size (%zu)",
173 len, max_klass_range_size());
174 vm_exit_during_initialization(ss.base());
175 }
176
177 // Remember Klass range:
178 _klass_range_start = addr;
179 _klass_range_end = addr + len;
180
181 _base = requested_base;
182 _shift = requested_shift;
183
184 calc_lowest_highest_narrow_klass_id();
185
186 // This has already been checked for SharedBaseAddress and if this fails, it's a bug in the allocation code.
187 if (!set_klass_decode_mode()) {
188 fatal("base=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers",
189 p2i(_base), _shift);
190 }
191
192 DEBUG_ONLY(sanity_check_after_initialization();)
193 }
194
195 char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
196 alignment = MAX2(Metaspace::reserve_alignment(), alignment);
197 return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
198 }
199
200 char* CompressedKlassPointers::reserve_address_space_below_4G(size_t size, bool aslr) {
201 return reserve_address_space_X(0, nth_bit(32), size, Metaspace::reserve_alignment(), aslr);
202 }
203
204 char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
205 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
206 return reserve_address_space_X(0, unscaled_max, size, Metaspace::reserve_alignment(), aslr);
207 }
208
209 char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) {
210 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
211 const size_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
212 return reserve_address_space_X(unscaled_max, zerobased_max, size, Metaspace::reserve_alignment(), aslr);
213 }
214
215 char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) {
216 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
217 }
218
219 void CompressedKlassPointers::initialize(address addr, size_t len) {
220
221 if (len > max_klass_range_size()) {
222 stringStream ss;
223 ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
224 len, max_klass_range_size());
225 vm_exit_during_initialization(ss.base());
226 }
227
228 // Remember the Klass range:
229 _klass_range_start = addr;
230 _klass_range_end = addr + len;
231
232 // Calculate Base and Shift:
233
234 if (UseCompactObjectHeaders) {
235
236 // This handles the case that we - experimentally - reduce the number of
237 // class pointer bits further, such that (shift + num bits) < 32.
238 assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()),
239 "klass range size exceeds encoding, len: %zu, narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift());
240
241 // In compact object header mode, with 19-bit narrowKlass, we don't attempt for
242 // zero-based mode. Instead, we set the base to the start of the klass range and
243 // then try for the smallest shift possible that still covers the whole range.
244 // The reason is that we want to avoid, if possible, shifts larger than
245 // a cacheline size.
246 _base = addr;
247
248 const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
249 int s = max_shift();
250 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
251 s--;
252 }
253 _shift = s;
254
255 } else {
256
257 // Traditional (non-compact) header mode
258 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
259 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
260
261 #ifdef AARCH64
262 // Aarch64 avoids zero-base shifted mode (_base=0 _shift>0), instead prefers
263 // non-zero-based mode with a zero shift.
264 _shift = 0;
265 address const end = addr + len;
266 _base = (end <= (address)unscaled_max) ? nullptr : addr;
267 #else
268 // We try, in order of preference:
269 // -unscaled (base=0 shift=0)
270 // -zero-based (base=0 shift>0)
271 // -nonzero-base (base>0 shift=0)
272 // Note that base>0 shift>0 should never be needed, since the klass range will
273 // never exceed 4GB.
274 address const end = addr + len;
275 if (end <= (address)unscaled_max) {
276 _base = nullptr;
277 _shift = 0;
278 } else {
279 if (end <= (address)zerobased_max) {
280 _base = nullptr;
281 _shift = max_shift();
282 } else {
283 _base = addr;
284 _shift = 0;
285 }
286 }
287 #endif // AARCH64
288 }
289
290 calc_lowest_highest_narrow_klass_id();
291
292 // Initialize klass decode mode and check compability with decode instructions
293 if (!set_klass_decode_mode()) {
294
295 // Give fatal error if this is a specified address
296 if (CompressedClassSpaceBaseAddress == (size_t)_base) {
297 vm_exit_during_initialization(
298 err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers",
299 CompressedClassSpaceBaseAddress, _shift));
300 } else {
301 // If this fails, it's a bug in the allocation code.
302 fatal("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers",
303 p2i(_base), _shift);
304 }
305 }
306 #ifdef ASSERT
307 sanity_check_after_initialization();
308 #endif
309 }
310
311 void CompressedKlassPointers::print_mode(outputStream* st) {
312 st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d",
313 UseCompressedClassPointers, UseCompactObjectHeaders);
314 if (UseCompressedClassPointers) {
315 st->print_cr("Narrow klass pointer bits %d, Max shift %d",
316 _narrow_klass_pointer_bits, _max_shift);
317 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
318 p2i(base()), shift());
319 st->print_cr("Encoding Range: " RANGE2FMT, RANGE2FMTARGS(_base, encoding_range_end()));
320 st->print_cr("Klass Range: " RANGE2FMT, RANGE2FMTARGS(_klass_range_start, _klass_range_end));
321 st->print_cr("Klass ID Range: [%u - %u) (%u)", _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id + 1,
322 _highest_valid_narrow_klass_id + 1 - _lowest_valid_narrow_klass_id);
323 if (_protection_zone_size > 0) {
324 st->print_cr("Protection zone: " RANGEFMT, RANGEFMTARGS(_base, _protection_zone_size));
325 } else {
326 st->print_cr("No protection zone.");
327 }
328 } else {
329 st->print_cr("UseCompressedClassPointers off");
330 }
331 }
332
333 // On AIX, we cannot mprotect archive space or class space since they are reserved with SystemV shm.
334 static constexpr bool can_mprotect_archive_space = NOT_AIX(true) AIX_ONLY(false);
335
336 // Protect a zone a the start of the encoding range
337 void CompressedKlassPointers::establish_protection_zone(address addr, size_t size) {
338 assert(_protection_zone_size == 0, "just once");
339 assert(addr == base(), "Protection zone not at start of encoding range?");
340 assert(size > 0 && is_aligned(size, os::vm_page_size()), "Protection zone not page sized");
341 const bool rc = can_mprotect_archive_space && os::protect_memory((char*)addr, size, os::MEM_PROT_NONE, false);
342 log_info(metaspace)("%s Narrow Klass Protection zone " RANGEFMT,
343 (rc ? "Established" : "FAILED to establish "),
344 RANGEFMTARGS(addr, size));
345 if (!rc) {
346 // If we fail to establish the protection zone, we fill it with a clear pattern to make it
347 // stick out in register values (0x50 aka 'P', repeated)
348 os::commit_memory((char*)addr, size, false);
349 memset(addr, 'P', size);
350 }
351 _protection_zone_size = size;
352 }
353
354 bool CompressedKlassPointers::is_in_protection_zone(address addr) {
355 return _protection_zone_size > 0 ?
356 (addr >= base() && addr < base() + _protection_zone_size) : false;
357 }
358
359 #endif // _LP64