1 /* 2 * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "logging/log.hpp" 26 #include "memory/metaspace.hpp" 27 #include "oops/compressedKlass.inline.hpp" 28 #include "runtime/globals.hpp" 29 #include "runtime/java.hpp" 30 #include "runtime/os.hpp" 31 #include "utilities/debug.hpp" 32 #include "utilities/formatBuffer.hpp" 33 #include "utilities/globalDefinitions.hpp" 34 #include "utilities/ostream.hpp" 35 36 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1; 37 int CompressedKlassPointers::_max_shift = -1; 38 39 address CompressedKlassPointers::_base = (address)-1; 40 int CompressedKlassPointers::_shift = -1; 41 address CompressedKlassPointers::_klass_range_start = nullptr; 42 address CompressedKlassPointers::_klass_range_end = nullptr; 43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1; 44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1; 45 size_t CompressedKlassPointers::_protection_zone_size = 0; 46 47 #ifdef _LP64 48 49 size_t CompressedKlassPointers::max_klass_range_size() { 50 // We disallow klass range sizes larger than 4GB even if the encoding 51 // range would allow for a larger Klass range (e.g. Base=zero, shift=3 -> 32GB). 52 // That is because many CPU-specific compiler decodings do not want the 53 // shifted narrow Klass to spill over into the third quadrant of the 64-bit target 54 // address, e.g. to use a 16-bit move for a simplified base addition. 55 return MIN2(4 * G, max_encoding_range_size()); 56 } 57 58 void CompressedKlassPointers::pre_initialize() { 59 if (UseCompactObjectHeaders) { 60 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh; 61 _max_shift = max_shift_coh; 62 } else { 63 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh; 64 _max_shift = max_shift_noncoh; 65 } 66 } 67 68 #ifdef ASSERT 69 void CompressedKlassPointers::sanity_check_after_initialization() { 70 // In expectation of an assert, prepare condensed info to be printed with the assert. 71 char tmp[256]; 72 os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT "," 73 " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u", 74 RANGE2FMTARGS(_klass_range_start, _klass_range_end), 75 p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id); 76 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp); 77 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp); 78 79 // There is no technical reason preventing us from using other klass pointer bit lengths, 80 // but it should be a deliberate choice 81 ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19); 82 83 // All values must be inited 84 ASSERT_HERE(_max_shift != -1); 85 ASSERT_HERE(_klass_range_start != (address)-1); 86 ASSERT_HERE(_klass_range_end != (address)-1); 87 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1); 88 ASSERT_HERE(_base != (address)-1); 89 ASSERT_HERE(_shift != -1); 90 91 const size_t klass_align = klass_alignment_in_bytes(); 92 93 // must be aligned enough hold 64-bit data 94 ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t))); 95 96 // should be smaller than the minimum metaspace chunk size (soft requirement) 97 ASSERT_HERE(klass_align <= K); 98 99 ASSERT_HERE(_klass_range_end > _klass_range_start); 100 101 // Check that Klass range is fully engulfed in the encoding range 102 const address encoding_start = _base; 103 const address encoding_end = (address)(p2u(_base) + (uintptr_t)nth_bit(narrow_klass_pointer_bits() + _shift)); 104 ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end, 105 "Resulting encoding range does not fully cover the class range"); 106 107 // Check that Klass range is aligned to Klass alignment. Note that this should never be 108 // an issue since the Klass range is handed in by either CDS- or Metaspace-initialization, and 109 // it should be the result of an mmap operation that operates on page sizes. So as long as 110 // the Klass alignment is <= page size, we are fine. 111 ASSERT_HERE_2(is_aligned(_klass_range_start, klass_align) && 112 is_aligned(_klass_range_end, klass_align), 113 "Klass range must start and end at a properly aligned address"); 114 115 // Check _lowest_valid_narrow_klass_id and _highest_valid_narrow_klass_id 116 ASSERT_HERE_2(_lowest_valid_narrow_klass_id > 0, "Null is not a valid narrowKlass"); 117 ASSERT_HERE(_highest_valid_narrow_klass_id > _lowest_valid_narrow_klass_id); 118 119 Klass* const k1 = decode_not_null_without_asserts(_lowest_valid_narrow_klass_id, _base, _shift); 120 if (encoding_start == _klass_range_start) { 121 ASSERT_HERE_2((address)k1 == _klass_range_start + klass_align, "Not lowest"); 122 } else { 123 ASSERT_HERE_2((address)k1 == _klass_range_start, "Not lowest"); 124 } 125 narrowKlass nk1 = encode_not_null_without_asserts(k1, _base, _shift); 126 ASSERT_HERE_2(nk1 == _lowest_valid_narrow_klass_id, "not reversible"); 127 128 Klass* const k2 = decode_not_null_without_asserts(_highest_valid_narrow_klass_id, _base, _shift); 129 ASSERT_HERE((address)k2 == _klass_range_end - klass_align); 130 narrowKlass nk2 = encode_not_null_without_asserts(k2, _base, _shift); 131 ASSERT_HERE_2(nk2 == _highest_valid_narrow_klass_id, "not reversible"); 132 133 #ifdef AARCH64 134 // On aarch64, we never expect a shift value > 0 in standard (non-coh) mode 135 ASSERT_HERE_2(UseCompactObjectHeaders || _shift == 0, "Shift > 0 in non-coh mode?"); 136 #endif 137 #undef ASSERT_HERE 138 #undef ASSERT_HERE_2 139 } 140 #endif // ASSERT 141 142 // Helper function: given current Klass Range, Base and Shift, calculate the lowest and highest values 143 // of narrowKlass we can expect. 144 void CompressedKlassPointers::calc_lowest_highest_narrow_klass_id() { 145 address lowest_possible_klass_location = _klass_range_start; 146 147 // A Klass will never be placed at the Encoding range start, since that would translate to a narrowKlass=0, which 148 // is disallowed. Note that both Metaspace and CDS prvent allocation at the first address for this reason. 149 if (lowest_possible_klass_location == _base) { 150 lowest_possible_klass_location += klass_alignment_in_bytes(); 151 } 152 _lowest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(lowest_possible_klass_location - _base) >> _shift); 153 154 address highest_possible_klass_location = _klass_range_end - klass_alignment_in_bytes(); 155 _highest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(highest_possible_klass_location - _base) >> _shift); 156 } 157 158 // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then 159 // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for 160 // archived heap objects. 161 void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) { 162 if (len > max_klass_range_size()) { 163 stringStream ss; 164 ss.print("Class space size and CDS archive size combined (%zu) " 165 "exceed the maximum possible size (%zu)", 166 len, max_klass_range_size()); 167 vm_exit_during_initialization(ss.base()); 168 } 169 170 // Remember Klass range: 171 _klass_range_start = addr; 172 _klass_range_end = addr + len; 173 174 _base = requested_base; 175 _shift = requested_shift; 176 177 calc_lowest_highest_narrow_klass_id(); 178 179 // This has already been checked for SharedBaseAddress and if this fails, it's a bug in the allocation code. 180 if (!set_klass_decode_mode()) { 181 fatal("base=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers", 182 p2i(_base), _shift); 183 } 184 185 DEBUG_ONLY(sanity_check_after_initialization();) 186 } 187 188 char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) { 189 alignment = MAX2(Metaspace::reserve_alignment(), alignment); 190 return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr); 191 } 192 193 char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) { 194 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); 195 return reserve_address_space_X(0, unscaled_max, size, Metaspace::reserve_alignment(), aslr); 196 } 197 198 char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) { 199 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); 200 const size_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift()); 201 return reserve_address_space_X(unscaled_max, zerobased_max, size, Metaspace::reserve_alignment(), aslr); 202 } 203 204 char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) { 205 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr); 206 } 207 208 void CompressedKlassPointers::initialize(address addr, size_t len) { 209 210 if (len > max_klass_range_size()) { 211 stringStream ss; 212 ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)", 213 len, max_klass_range_size()); 214 vm_exit_during_initialization(ss.base()); 215 } 216 217 // Remember the Klass range: 218 _klass_range_start = addr; 219 _klass_range_end = addr + len; 220 221 // Calculate Base and Shift: 222 223 if (UseCompactObjectHeaders) { 224 225 // This handles the case that we - experimentally - reduce the number of 226 // class pointer bits further, such that (shift + num bits) < 32. 227 assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()), 228 "klass range size exceeds encoding, len: %zu, narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift()); 229 230 // In compact object header mode, with 19-bit narrowKlass, we don't attempt for 231 // zero-based mode. Instead, we set the base to the start of the klass range and 232 // then try for the smallest shift possible that still covers the whole range. 233 // The reason is that we want to avoid, if possible, shifts larger than 234 // a cacheline size. 235 _base = addr; 236 237 const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE); 238 int s = max_shift(); 239 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) { 240 s--; 241 } 242 _shift = s; 243 244 } else { 245 246 // Traditional (non-compact) header mode 247 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); 248 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift()); 249 250 #ifdef AARCH64 251 // Aarch64 avoids zero-base shifted mode (_base=0 _shift>0), instead prefers 252 // non-zero-based mode with a zero shift. 253 _shift = 0; 254 address const end = addr + len; 255 _base = (end <= (address)unscaled_max) ? nullptr : addr; 256 #else 257 // We try, in order of preference: 258 // -unscaled (base=0 shift=0) 259 // -zero-based (base=0 shift>0) 260 // -nonzero-base (base>0 shift=0) 261 // Note that base>0 shift>0 should never be needed, since the klass range will 262 // never exceed 4GB. 263 address const end = addr + len; 264 if (end <= (address)unscaled_max) { 265 _base = nullptr; 266 _shift = 0; 267 } else { 268 if (end <= (address)zerobased_max) { 269 _base = nullptr; 270 _shift = max_shift(); 271 } else { 272 _base = addr; 273 _shift = 0; 274 } 275 } 276 #endif // AARCH64 277 } 278 279 calc_lowest_highest_narrow_klass_id(); 280 281 // Initialize klass decode mode and check compability with decode instructions 282 if (!set_klass_decode_mode()) { 283 284 // Give fatal error if this is a specified address 285 if (CompressedClassSpaceBaseAddress == (size_t)_base) { 286 vm_exit_during_initialization( 287 err_msg("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers", 288 CompressedClassSpaceBaseAddress, _shift)); 289 } else { 290 // If this fails, it's a bug in the allocation code. 291 fatal("CompressedClassSpaceBaseAddress=" PTR_FORMAT " given with shift %d, cannot be used to encode class pointers", 292 p2i(_base), _shift); 293 } 294 } 295 #ifdef ASSERT 296 sanity_check_after_initialization(); 297 #endif 298 } 299 300 void CompressedKlassPointers::print_mode(outputStream* st) { 301 st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d", 302 UseCompressedClassPointers, UseCompactObjectHeaders); 303 if (UseCompressedClassPointers) { 304 st->print_cr("Narrow klass pointer bits %d, Max shift %d", 305 _narrow_klass_pointer_bits, _max_shift); 306 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d", 307 p2i(base()), shift()); 308 st->print_cr("Encoding Range: " RANGE2FMT, RANGE2FMTARGS(_base, encoding_range_end())); 309 st->print_cr("Klass Range: " RANGE2FMT, RANGE2FMTARGS(_klass_range_start, _klass_range_end)); 310 st->print_cr("Klass ID Range: [%u - %u) (%u)", _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id + 1, 311 _highest_valid_narrow_klass_id + 1 - _lowest_valid_narrow_klass_id); 312 if (_protection_zone_size > 0) { 313 st->print_cr("Protection zone: " RANGEFMT, RANGEFMTARGS(_base, _protection_zone_size)); 314 } else { 315 st->print_cr("No protection zone."); 316 } 317 } else { 318 st->print_cr("UseCompressedClassPointers off"); 319 } 320 } 321 322 // On AIX, we cannot mprotect archive space or class space since they are reserved with SystemV shm. 323 static constexpr bool can_mprotect_archive_space = NOT_AIX(true) AIX_ONLY(false); 324 325 // Protect a zone a the start of the encoding range 326 void CompressedKlassPointers::establish_protection_zone(address addr, size_t size) { 327 assert(_protection_zone_size == 0, "just once"); 328 assert(addr == base(), "Protection zone not at start of encoding range?"); 329 assert(size > 0 && is_aligned(size, os::vm_page_size()), "Protection zone not page sized"); 330 const bool rc = can_mprotect_archive_space && os::protect_memory((char*)addr, size, os::MEM_PROT_NONE, false); 331 log_info(metaspace)("%s Narrow Klass Protection zone " RANGEFMT, 332 (rc ? "Established" : "FAILED to establish "), 333 RANGEFMTARGS(addr, size)); 334 if (!rc) { 335 // If we fail to establish the protection zone, we fill it with a clear pattern to make it 336 // stick out in register values (0x50 aka 'P', repeated) 337 os::commit_memory((char*)addr, size, false); 338 memset(addr, 'P', size); 339 } 340 _protection_zone_size = size; 341 } 342 343 bool CompressedKlassPointers::is_in_protection_zone(address addr) { 344 return _protection_zone_size > 0 ? 345 (addr >= base() && addr < base() + _protection_zone_size) : false; 346 } 347 348 #endif // _LP64