1 /* 2 * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "logging/log.hpp" 27 #include "memory/metaspace.hpp" 28 #include "oops/klass.hpp" 29 #include "oops/compressedKlass.inline.hpp" 30 #include "runtime/globals.hpp" 31 #include "runtime/java.hpp" 32 #include "runtime/os.hpp" 33 #include "utilities/debug.hpp" 34 #include "utilities/globalDefinitions.hpp" 35 #include "utilities/ostream.hpp" 36 37 int CompressedKlassPointers::_tiny_cp = -1; 38 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1; 39 int CompressedKlassPointers::_max_shift = -1; 40 #ifdef ASSERT 41 address CompressedKlassPointers::_klass_range_start = (address)-1; 42 address CompressedKlassPointers::_klass_range_end = (address)-1; 43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1; 44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1; 45 #endif 46 47 address CompressedKlassPointers::_base = (address)-1; 48 int CompressedKlassPointers::_shift = -1; 49 size_t CompressedKlassPointers::_range = (size_t)-1; 50 51 // The maximum allowed length of the Klass range (the address range engulfing 52 // CDS + class space) must not exceed 32-bit. 53 // There is a theoretical limit of: must not exceed the size of a fully-shifted 54 // narrow Klass pointer, which would be 32 + 3 = 35 bits in legacy mode; 55 // however, keeping this size below 32-bit allows us to use decoding techniques 56 // like 16-bit moves into the third quadrant on some architectures, and keeps 57 // the code less complex. 32-bit have always been enough for CDS+class space. 58 static constexpr size_t max_klass_range_size = 4 * G; 59 60 #ifdef _LP64 61 62 void CompressedKlassPointers::pre_initialize() { 63 if (UseCompactObjectHeaders) { 64 _tiny_cp = 1; 65 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_tinycp; 66 _max_shift = max_shift_tinycp; 67 } else { 68 _tiny_cp = 0; 69 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_legacy; 70 _max_shift = max_shift_legacy; 71 } 72 } 73 74 #ifdef ASSERT 75 void CompressedKlassPointers::sanity_check_after_initialization() { 76 // In expectation of an assert, prepare condensed info to be printed with the assert. 77 char tmp[256]; 78 os::snprintf(tmp, sizeof(tmp), PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %d " SIZE_FORMAT " %u %u", 79 p2i(_klass_range_start), p2i(_klass_range_end), p2i(_base), _shift, _range, 80 _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id); 81 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp); 82 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp); 83 84 // There is no technical reason preventing us from using other klass pointer bit lengths, 85 // but it should be a deliberate choice 86 ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 22); 87 88 // All values must be inited 89 ASSERT_HERE(_max_shift != -1); 90 ASSERT_HERE(_klass_range_start != (address)-1); 91 ASSERT_HERE(_klass_range_end != (address)-1); 92 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1); 93 ASSERT_HERE(_base != (address)-1); 94 ASSERT_HERE(_shift != -1); 95 ASSERT_HERE(_range != (size_t)-1); 96 97 const size_t klab = klass_alignment_in_bytes(); 98 ASSERT_HERE(klab >= sizeof(uint64_t) && klab <= K); 99 100 // Check that Klass range is fully engulfed in the encoding range 101 ASSERT_HERE(_klass_range_end > _klass_range_start); 102 103 const address encoding_end = _base + nth_bit(narrow_klass_pointer_bits() + _shift); 104 ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end, 105 "Resulting encoding range does not fully cover the class range"); 106 107 // Check that Klass range is aligned to Klass alignment. That should never be an issue since we mmap the 108 // relevant regions and klass alignment - tied to smallest metachunk size of 1K - will always be smaller 109 // than smallest page size of 4K. 110 ASSERT_HERE_2(is_aligned(_klass_range_start, klab) && is_aligned(_klass_range_end, klab), 111 "Klass range must start at a properly aligned address"); 112 113 // Check that lowest and highest possible narrowKlass values make sense 114 ASSERT_HERE_2(_lowest_valid_narrow_klass_id > 0, "Null is not a valid narrowKlass"); 115 ASSERT_HERE(_highest_valid_narrow_klass_id > _lowest_valid_narrow_klass_id); 116 117 Klass* k1 = decode_not_null_without_asserts(_lowest_valid_narrow_klass_id, _base, _shift); 118 ASSERT_HERE_2((address)k1 == _klass_range_start + klab, "Not lowest"); 119 narrowKlass nk1 = encode_not_null_without_asserts(k1, _base, _shift); 120 ASSERT_HERE_2(nk1 == _lowest_valid_narrow_klass_id, "not reversible"); 121 122 Klass* k2 = decode_not_null_without_asserts(_highest_valid_narrow_klass_id, _base, _shift); 123 // _highest_valid_narrow_klass_id must be decoded to the highest theoretically possible 124 // valid Klass* position in range, if we assume minimal Klass size 125 ASSERT_HERE((address)k2 < _klass_range_end); 126 ASSERT_HERE_2(align_up(((address)k2 + sizeof(Klass)), klab) >= _klass_range_end, "Not highest"); 127 narrowKlass nk2 = encode_not_null_without_asserts(k2, _base, _shift); 128 ASSERT_HERE_2(nk2 == _highest_valid_narrow_klass_id, "not reversible"); 129 130 #ifdef AARCH64 131 // On aarch64, we never expect a shift value > 0 in legacy mode 132 ASSERT_HERE_2(tiny_classpointer_mode() || _shift == 0, "Shift > 0 in legacy mode?"); 133 #endif 134 #undef ASSERT_HERE 135 #undef ASSERT_HERE_2 136 } 137 138 void CompressedKlassPointers::calc_lowest_highest_narrow_klass_id() { 139 // Given a Klass range, calculate lowest and highest narrowKlass. 140 const size_t klab = klass_alignment_in_bytes(); 141 // Note that 0 is not a valid narrowKlass, and Metaspace prevents us for that reason from allocating at 142 // the very start of class space. So the very first valid Klass position is start-of-range + klab. 143 _lowest_valid_narrow_klass_id = 144 (narrowKlass) (((uintptr_t)(_klass_range_start - _base) + klab) >> _shift); 145 address highest_possible_klass = align_down(_klass_range_end - sizeof(Klass), klab); 146 _highest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(highest_possible_klass - _base) >> _shift); 147 } 148 #endif // ASSERT 149 150 // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then 151 // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for 152 // archived heap objects. 153 void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) { 154 address const end = addr + len; 155 156 if (len > max_klass_range_size) { 157 // Class space size is limited to 3G. This can theoretically happen if the CDS archive 158 // is larger than 1G and class space size is set to the maximum possible 3G. 159 vm_exit_during_initialization("Sum of CDS archive size and class space size exceed 4 GB"); 160 } 161 162 const size_t encoding_range_size = nth_bit(narrow_klass_pointer_bits() + requested_shift); 163 address encoding_range_end = requested_base + encoding_range_size; 164 165 // Note: it would be technically valid for the encoding base to precede the start of the Klass range. But we only call 166 // this function from CDS, and therefore know this to be true. 167 assert(requested_base == addr, "Invalid requested base"); 168 169 _base = requested_base; 170 _shift = requested_shift; 171 _range = encoding_range_size; 172 173 #ifdef ASSERT 174 _klass_range_start = addr; 175 _klass_range_end = addr + len; 176 calc_lowest_highest_narrow_klass_id(); 177 sanity_check_after_initialization(); 178 #endif 179 180 DEBUG_ONLY(sanity_check_after_initialization();) 181 } 182 183 char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) { 184 alignment = MAX2(Metaspace::reserve_alignment(), alignment); 185 return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr); 186 } 187 188 char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) { 189 if (tiny_classpointer_mode()) { 190 return nullptr; 191 } 192 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); 193 return reserve_address_space_X(0, unscaled_max, size, Metaspace::reserve_alignment(), aslr); 194 } 195 196 char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) { 197 if (tiny_classpointer_mode()) { 198 return nullptr; 199 } 200 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); 201 const size_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift()); 202 return reserve_address_space_X(unscaled_max, zerobased_max, size, Metaspace::reserve_alignment(), aslr); 203 } 204 205 char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) { 206 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr); 207 } 208 209 void CompressedKlassPointers::initialize(address addr, size_t len) { 210 211 if (len > max_klass_range_size) { 212 // Class space size is limited to 3G. This can theoretically happen if the CDS archive 213 // is larger than 1G and class space size is set to the maximum possible 3G. 214 vm_exit_during_initialization("Sum of CDS archive size and class space size exceed 4 GB"); 215 } 216 217 // Give CPU a shot at a specialized init sequence 218 #ifndef ZERO 219 if (pd_initialize(addr, len)) { 220 return; 221 } 222 #endif 223 224 if (tiny_classpointer_mode()) { 225 226 // This handles the case that we - experimentally - reduce the number of 227 // class pointer bits further, such that (shift + num bits) < 32. 228 assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()), 229 "klass range size exceeds encoding"); 230 231 // In tiny classpointer mode, we don't attempt for zero-based mode. 232 // Instead, we set the base to the start of the klass range and then try 233 // for the smallest shift possible that still covers the whole range. 234 // The reason is that we want to avoid, if possible, shifts larger than 235 // a cacheline size. 236 _base = addr; 237 _range = len; 238 239 if (TinyClassPointerShift != 0) { 240 _shift = TinyClassPointerShift; 241 } else { 242 constexpr int log_cacheline = 6; 243 int s = max_shift(); 244 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) { 245 s--; 246 } 247 _shift = s; 248 } 249 250 } else { 251 252 // In legacy mode, we try, in order of preference: 253 // -unscaled (base=0 shift=0) 254 // -zero-based (base=0 shift>0) 255 // -nonzero-base (base>0 shift=0) 256 // Note that base>0 shift>0 should never be needed, since the klass range will 257 // never exceed 4GB. 258 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits()); 259 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift()); 260 261 address const end = addr + len; 262 if (end <= (address)unscaled_max) { 263 _base = nullptr; 264 _shift = 0; 265 } else { 266 if (end <= (address)zerobased_max) { 267 _base = nullptr; 268 _shift = max_shift(); 269 } else { 270 _base = addr; 271 _shift = 0; 272 } 273 } 274 _range = end - _base; 275 276 } 277 278 #ifdef ASSERT 279 _klass_range_start = addr; 280 _klass_range_end = addr + len; 281 calc_lowest_highest_narrow_klass_id(); 282 sanity_check_after_initialization(); 283 #endif 284 } 285 286 void CompressedKlassPointers::print_mode(outputStream* st) { 287 st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d, " 288 "narrow klass pointer bits %d, max shift %d", 289 UseCompressedClassPointers, UseCompactObjectHeaders, 290 _narrow_klass_pointer_bits, _max_shift); 291 if (_base == (address)-1) { 292 st->print_cr("Narrow klass encoding not initialized"); 293 return; 294 } 295 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, " 296 "Narrow klass range: " SIZE_FORMAT_X, p2i(base()), shift(), 297 range()); 298 #ifdef ASSERT 299 st->print_cr("Klass range: [" PTR_FORMAT "," PTR_FORMAT ")", 300 p2i(_klass_range_start), p2i(_klass_range_end)); 301 st->print_cr("Lowest valid nklass id: %u Highest valid nklass id: %u", 302 _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id); 303 #endif 304 } 305 306 #endif // _LP64