1 /*
2 * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "logging/log.hpp"
26 #include "memory/metaspace.hpp"
27 #include "oops/compressedKlass.inline.hpp"
28 #include "runtime/globals.hpp"
29 #include "runtime/java.hpp"
30 #include "runtime/os.hpp"
31 #include "utilities/debug.hpp"
32 #include "utilities/formatBuffer.hpp"
33 #include "utilities/globalDefinitions.hpp"
34 #include "utilities/ostream.hpp"
35
36 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
37 int CompressedKlassPointers::_max_shift = -1;
38
39 address CompressedKlassPointers::_base = (address)-1;
40 int CompressedKlassPointers::_shift = -1;
41 address CompressedKlassPointers::_klass_range_start = nullptr;
42 address CompressedKlassPointers::_klass_range_end = nullptr;
43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
45 size_t CompressedKlassPointers::_protection_zone_size = 0;
46
47 size_t CompressedKlassPointers::max_klass_range_size() {
48 #ifdef _LP64
49 const size_t encoding_allows = nth_bit(narrow_klass_pointer_bits() + max_shift());
50 assert(!UseCompactObjectHeaders || max_klass_range_size_coh == encoding_allows, "Sanity");
51 constexpr size_t cap = 4 * G;
52 return MIN2(encoding_allows, cap);
53 #else
54 // 32-bit: only 32-bit "narrow" Klass pointers allowed. If we ever support smaller narrow
55 // Klass pointers here, coding needs to be revised.
56 // We keep one page safety zone free to guard against size_t overflows on 32-bit. In practice
57 // this is irrelevant because these upper address space parts are not user-addressable on
58 // any of our 32-bit platforms.
59 return align_down(UINT_MAX, os::vm_page_size());
60 #endif
61 }
62
63 void CompressedKlassPointers::pre_initialize() {
64 if (UseCompactObjectHeaders) {
65 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
66 _max_shift = max_shift_coh;
67 } else {
68 #ifdef _LP64
69 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
70 _max_shift = max_shift_noncoh;
71 #else
72 _narrow_klass_pointer_bits = 32;
73 _max_shift = 0;
74 #endif
75 }
76 }
77
78 #ifdef ASSERT
79 void CompressedKlassPointers::sanity_check_after_initialization() {
80 // In expectation of an assert, prepare condensed info to be printed with the assert.
81 char tmp[256];
82 os::snprintf_checked(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
83 " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
84 RANGE2FMTARGS(_klass_range_start, _klass_range_end),
85 p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
86 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
87 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
88
89 // There is no technical reason preventing us from using other klass pointer bit lengths,
90 // but it should be a deliberate choice
91 ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19);
92
93 // All values must be inited
94 ASSERT_HERE(_max_shift != -1);
95 ASSERT_HERE(_klass_range_start != (address)-1);
96 ASSERT_HERE(_klass_range_end != (address)-1);
97 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
98 ASSERT_HERE(_base != (address)-1);
99 ASSERT_HERE(_shift != -1);
100
101 // We should need a class space if address space is larger than what narrowKlass can address
102 const bool should_need_class_space = (BytesPerWord * BitsPerByte) > narrow_klass_pointer_bits();
103 ASSERT_HERE(should_need_class_space == needs_class_space());
104
105 const size_t klass_align = klass_alignment_in_bytes();
106
107 // must be aligned enough hold 64-bit data
108 ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
109
110 // should be smaller than the minimum metaspace chunk size (soft requirement)
111 ASSERT_HERE(klass_align <= K);
112
113 ASSERT_HERE(_klass_range_end > _klass_range_start);
114
115 // Check that Klass range is fully engulfed in the encoding range
116 const address encoding_start = _base;
117 const address encoding_end = (address)
118 LP64_ONLY((p2u(_base) + (uintptr_t)nth_bit(narrow_klass_pointer_bits() + _shift)))
119 NOT_LP64(max_klass_range_size());
120 ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end,
121 "Resulting encoding range does not fully cover the class range");
122
123 // Check that Klass range is aligned to Klass alignment. Note that this should never be
124 // an issue since the Klass range is handed in by either CDS- or Metaspace-initialization, and
125 // it should be the result of an mmap operation that operates on page sizes. So as long as
126 // the Klass alignment is <= page size, we are fine.
127 ASSERT_HERE_2(is_aligned(_klass_range_start, klass_align) &&
128 is_aligned(_klass_range_end, klass_align),
129 "Klass range must start and end at a properly aligned address");
130
131 // Check _lowest_valid_narrow_klass_id and _highest_valid_narrow_klass_id
132 ASSERT_HERE_2(_lowest_valid_narrow_klass_id > 0, "Null is not a valid narrowKlass");
133 ASSERT_HERE(_highest_valid_narrow_klass_id > _lowest_valid_narrow_klass_id);
134
135 Klass* const k1 = decode_not_null_without_asserts(_lowest_valid_narrow_klass_id, _base, _shift);
136 if (encoding_start == _klass_range_start) {
137 ASSERT_HERE_2((address)k1 == _klass_range_start + klass_align, "Not lowest");
138 } else {
139 ASSERT_HERE_2((address)k1 == _klass_range_start, "Not lowest");
140 }
141 narrowKlass nk1 = encode_not_null_without_asserts(k1, _base, _shift);
142 ASSERT_HERE_2(nk1 == _lowest_valid_narrow_klass_id, "not reversible");
143
144 Klass* const k2 = decode_not_null_without_asserts(_highest_valid_narrow_klass_id, _base, _shift);
145 ASSERT_HERE((address)k2 == _klass_range_end - klass_align);
146 narrowKlass nk2 = encode_not_null_without_asserts(k2, _base, _shift);
147 ASSERT_HERE_2(nk2 == _highest_valid_narrow_klass_id, "not reversible");
148
149 #ifdef AARCH64
150 // On aarch64, we never expect a shift value > 0 in standard (non-coh) mode
151 ASSERT_HERE_2(UseCompactObjectHeaders || _shift == 0, "Shift > 0 in non-coh mode?");
152 #endif
153 #undef ASSERT_HERE
154 #undef ASSERT_HERE_2
155 }
156 #endif // ASSERT
157
158 // Helper function: given current Klass Range, Base and Shift, calculate the lowest and highest values
159 // of narrowKlass we can expect.
160 void CompressedKlassPointers::calc_lowest_highest_narrow_klass_id() {
161 address lowest_possible_klass_location = _klass_range_start;
162
163 // A Klass will never be placed at the Encoding range start, since that would translate to a narrowKlass=0, which
164 // is disallowed. If the encoding range starts at the klass range start, both Metaspace and CDS establish an
165 // mprotected zone for this reason (see establish_protection_zone).
166 if (lowest_possible_klass_location == _base) {
167 lowest_possible_klass_location += klass_alignment_in_bytes();
168 }
169 _lowest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(lowest_possible_klass_location - _base) >> _shift);
170
171 address highest_possible_klass_location = _klass_range_end - klass_alignment_in_bytes();
172 _highest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(highest_possible_klass_location - _base) >> _shift);
173 }
174
175 // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then
176 // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
177 // archived heap objects.
178 void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) {
179 if (len > max_klass_range_size()) {
180 stringStream ss;
181 ss.print("Class space size and CDS archive size combined (%zu) "
182 "exceed the maximum possible size (%zu)",
183 len, max_klass_range_size());
184 vm_exit_during_initialization(ss.base());
185 }
186
187 // Remember Klass range:
188 _klass_range_start = addr;
189 _klass_range_end = addr + len;
190
191 _base = requested_base;
192 _shift = requested_shift;
193
194 calc_lowest_highest_narrow_klass_id();
195
196 // This has already been checked for SharedBaseAddress and if this fails, it's a bug in the allocation code.
197 if (!set_klass_decode_mode()) {
198 fatal("base=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers",
199 p2i(_base), _shift);
200 }
201
202 DEBUG_ONLY(sanity_check_after_initialization();)
203 }
204
205 char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
206 alignment = MAX2(Metaspace::reserve_alignment(), alignment);
207 return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
208 }
209
210 char* CompressedKlassPointers::reserve_address_space_below_4G(size_t size, bool aslr) {
211 return reserve_address_space_X(0, nth_bit(32), size, Metaspace::reserve_alignment(), aslr);
212 }
213
214 char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
215 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
216 return reserve_address_space_X(0, unscaled_max, size, Metaspace::reserve_alignment(), aslr);
217 }
218
219 char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) {
220 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
221 const size_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
222 return reserve_address_space_X(unscaled_max, zerobased_max, size, Metaspace::reserve_alignment(), aslr);
223 }
224
225 char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) {
226 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
227 }
228
229 void CompressedKlassPointers::initialize(address addr, size_t len) {
230
231 if (len > max_klass_range_size()) {
232 stringStream ss;
233 ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
234 len, max_klass_range_size());
235 vm_exit_during_initialization(ss.base());
236 }
237
238 // Remember the Klass range:
239 _klass_range_start = addr;
240 _klass_range_end = addr + len;
241
242 // Calculate Base and Shift:
243
244 if (UseCompactObjectHeaders) {
245
246 // This handles the case that we - experimentally - reduce the number of
247 // class pointer bits further, such that (shift + num bits) < 32.
248 assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()),
249 "klass range size exceeds encoding, len: %zu, narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift());
250
251 // In compact object header mode, with 19-bit narrowKlass, we don't attempt for
252 // zero-based mode. Instead, we set the base to the start of the klass range and
253 // then try for the smallest shift possible that still covers the whole range.
254 // The reason is that we want to avoid, if possible, shifts larger than
255 // a cacheline size.
256 _base = addr;
257
258 const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
259 int s = max_shift();
260 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
261 s--;
262 }
263 _shift = s;
264
265 } else {
266
267 #ifdef _LP64
268 // Traditional (non-compact) header mode
269 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
270 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
271
272 #ifdef AARCH64
273 // Aarch64 avoids zero-base shifted mode (_base=0 _shift>0), instead prefers
274 // non-zero-based mode with a zero shift.
275 _shift = 0;
276 address const end = addr + len;
277 _base = (end <= (address)unscaled_max) ? nullptr : addr;
278 #else
279
280 // We try, in order of preference:
281 // -unscaled (base=0 shift=0)
282 // -zero-based (base=0 shift>0)
283 // -nonzero-base (base>0 shift=0)
284 // Note that base>0 shift>0 should never be needed, since the klass range will
285 // never exceed 4GB.
286 address const end = addr + len;
287 if (end <= (address)unscaled_max) {
288 _base = nullptr;
289 _shift = 0;
290 } else {
291 if (end <= (address)zerobased_max) {
292 _base = nullptr;
293 _shift = max_shift();
294 } else {
295 _base = addr;
296 _shift = 0;
297 }
298 }
299 #endif // AARCH64
300 #else
301 // 32-bit "compressed class pointer" mode
302 _base = nullptr;
303 _shift = 0;
304 // as our "protection zone", we just assume the lowest protected parts of
305 // the user address space.
306 _protection_zone_size = os::vm_min_address();
307 #endif // LP64
308 }
309
310 calc_lowest_highest_narrow_klass_id();
311
312 // Initialize JIT-specific decoding settings
313 if (!set_klass_decode_mode()) {
314
315 // Give fatal error if this is a specified address
316 if (CompressedClassSpaceBaseAddress == (size_t)_base) {
317 vm_exit_during_initialization(
318 err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers",
319 CompressedClassSpaceBaseAddress, _shift));
320 } else {
321 // If this fails, it's a bug in the allocation code.
322 fatal("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers",
323 p2i(_base), _shift);
324 }
325 }
326
327 DEBUG_ONLY(sanity_check_after_initialization();)
328 }
329
330 void CompressedKlassPointers::print_mode(outputStream* st) {
331 st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d",
332 UseCompressedClassPointers, UseCompactObjectHeaders);
333 if (UseCompressedClassPointers) {
334 st->print_cr("Narrow klass pointer bits %d, Max shift %d",
335 _narrow_klass_pointer_bits, _max_shift);
336 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
337 p2i(base()), shift());
338 st->print_cr("Encoding Range: " RANGE2FMT, RANGE2FMTARGS(_base, encoding_range_end()));
339 st->print_cr("Klass Range: " RANGE2FMT, RANGE2FMTARGS(_klass_range_start, _klass_range_end));
340 st->print_cr("Klass ID Range: [%u - %u) (%u)", _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id + 1,
341 _highest_valid_narrow_klass_id + 1 - _lowest_valid_narrow_klass_id);
342 if (_protection_zone_size > 0) {
343 st->print_cr("Protection zone: " RANGEFMT, RANGEFMTARGS(_base, _protection_zone_size));
344 } else {
345 st->print_cr("No protection zone.");
346 }
347 } else {
348 st->print_cr("UseCompressedClassPointers off");
349 }
350 }
351
352 // On AIX, we cannot mprotect archive space or class space since they are reserved with SystemV shm.
353 static constexpr bool can_mprotect_archive_space = NOT_AIX(true) AIX_ONLY(false);
354
355 // Protect a zone a the start of the encoding range
356 void CompressedKlassPointers::establish_protection_zone(address addr, size_t size) {
357 assert(_protection_zone_size == 0, "just once");
358 assert(addr == base(), "Protection zone not at start of encoding range?");
359 assert(size > 0 && is_aligned(size, os::vm_page_size()), "Protection zone not page sized");
360 const bool rc = can_mprotect_archive_space && os::protect_memory((char*)addr, size, os::MEM_PROT_NONE, false);
361 log_info(metaspace)("%s Narrow Klass Protection zone " RANGEFMT,
362 (rc ? "Established" : "FAILED to establish "),
363 RANGEFMTARGS(addr, size));
364 if (!rc) {
365 // If we fail to establish the protection zone, we fill it with a clear pattern to make it
366 // stick out in register values (0x50 aka 'P', repeated)
367 os::commit_memory((char*)addr, size, false);
368 memset(addr, 'P', size);
369 }
370 _protection_zone_size = size;
371 }
372
373 bool CompressedKlassPointers::is_in_protection_zone(address addr) {
374 return _protection_zone_size > 0 ?
375 (addr >= base() && addr < base() + _protection_zone_size) : false;
376 }
377