30 #include "runtime/os.hpp"
31 #include "utilities/debug.hpp"
32 #include "utilities/formatBuffer.hpp"
33 #include "utilities/globalDefinitions.hpp"
34 #include "utilities/ostream.hpp"
35
36 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
37 int CompressedKlassPointers::_max_shift = -1;
38
39 address CompressedKlassPointers::_base = (address)-1;
40 int CompressedKlassPointers::_shift = -1;
41 address CompressedKlassPointers::_klass_range_start = nullptr;
42 address CompressedKlassPointers::_klass_range_end = nullptr;
43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
45 size_t CompressedKlassPointers::_protection_zone_size = 0;
46
47 #ifdef _LP64
48
49 size_t CompressedKlassPointers::max_klass_range_size() {
50 // We disallow klass range sizes larger than 4GB even if the encoding
51 // range would allow for a larger Klass range (e.g. Base=zero, shift=3 -> 32GB).
52 // That is because many CPU-specific compiler decodings do not want the
53 // shifted narrow Klass to spill over into the third quadrant of the 64-bit target
54 // address, e.g. to use a 16-bit move for a simplified base addition.
55 return MIN2(4 * G, max_encoding_range_size());
56 }
57
58 void CompressedKlassPointers::pre_initialize() {
59 if (UseCompactObjectHeaders) {
60 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
61 _max_shift = max_shift_coh;
62 } else {
63 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
64 _max_shift = max_shift_noncoh;
65 }
66 }
67
68 #ifdef ASSERT
69 void CompressedKlassPointers::sanity_check_after_initialization() {
70 // In expectation of an assert, prepare condensed info to be printed with the assert.
71 char tmp[256];
72 os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
73 " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
74 RANGE2FMTARGS(_klass_range_start, _klass_range_end),
75 p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
76 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
77 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
78
79 // All values must be inited
80 ASSERT_HERE(_max_shift != -1);
81 ASSERT_HERE(_klass_range_start != (address)-1);
82 ASSERT_HERE(_klass_range_end != (address)-1);
83 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
84 ASSERT_HERE(_base != (address)-1);
85 ASSERT_HERE(_shift != -1);
86
87 const size_t klass_align = klass_alignment_in_bytes();
88
89 // must be aligned enough hold 64-bit data
90 ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
91
92 // should be smaller than the minimum metaspace chunk size (soft requirement)
93 ASSERT_HERE(klass_align <= K);
94
95 ASSERT_HERE(_klass_range_end > _klass_range_start);
96
97 // Check that Klass range is fully engulfed in the encoding range
98 const address encoding_start = _base;
205 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
206 }
207
208 void CompressedKlassPointers::initialize(address addr, size_t len) {
209
210 if (len > max_klass_range_size()) {
211 stringStream ss;
212 ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
213 len, max_klass_range_size());
214 vm_exit_during_initialization(ss.base());
215 }
216
217 // Remember the Klass range:
218 _klass_range_start = addr;
219 _klass_range_end = addr + len;
220
221 // Calculate Base and Shift:
222
223 if (UseCompactObjectHeaders) {
224
225 // In compact object header mode, with 22-bit narrowKlass, we don't attempt for
226 // zero-based mode. Instead, we set the base to the start of the klass range and
227 // then try for the smallest shift possible that still covers the whole range.
228 // The reason is that we want to avoid, if possible, shifts larger than
229 // a cacheline size.
230 _base = addr;
231
232 const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
233 int s = max_shift();
234 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
235 s--;
236 }
237 _shift = s;
238
239 } else {
240
241 // Traditional (non-compact) header mode
242 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
243 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
244
245 #ifdef AARCH64
|
30 #include "runtime/os.hpp"
31 #include "utilities/debug.hpp"
32 #include "utilities/formatBuffer.hpp"
33 #include "utilities/globalDefinitions.hpp"
34 #include "utilities/ostream.hpp"
35
36 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
37 int CompressedKlassPointers::_max_shift = -1;
38
39 address CompressedKlassPointers::_base = (address)-1;
40 int CompressedKlassPointers::_shift = -1;
41 address CompressedKlassPointers::_klass_range_start = nullptr;
42 address CompressedKlassPointers::_klass_range_end = nullptr;
43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
45 size_t CompressedKlassPointers::_protection_zone_size = 0;
46
47 #ifdef _LP64
48
49 size_t CompressedKlassPointers::max_klass_range_size() {
50 #ifdef _LP64
51 const size_t encoding_allows = nth_bit(narrow_klass_pointer_bits() + max_shift());
52 assert(!UseCompactObjectHeaders || max_klass_range_size_coh == encoding_allows, "Sanity");
53 constexpr size_t cap = 4 * G;
54 return MIN2(encoding_allows, cap);
55 #else
56 // 32-bit: only 32-bit "narrow" Klass pointers allowed. If we ever support smaller narrow
57 // Klass pointers here, coding needs to be revised.
58 // We keep one page safety zone free to guard against size_t overflows on 32-bit. In practice
59 // this is irrelevant because these upper address space parts are not user-addressable on
60 // any of our 32-bit platforms.
61 return align_down(UINT_MAX, os::vm_page_size());
62 #endif
63 }
64
65 void CompressedKlassPointers::pre_initialize() {
66 if (UseCompactObjectHeaders) {
67 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
68 _max_shift = max_shift_coh;
69 } else {
70 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
71 _max_shift = max_shift_noncoh;
72 }
73 }
74
75 #ifdef ASSERT
76 void CompressedKlassPointers::sanity_check_after_initialization() {
77 // In expectation of an assert, prepare condensed info to be printed with the assert.
78 char tmp[256];
79 os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
80 " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
81 RANGE2FMTARGS(_klass_range_start, _klass_range_end),
82 p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
83 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
84 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
85
86 // There is no technical reason preventing us from using other klass pointer bit lengths,
87 // but it should be a deliberate choice
88 ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19);
89
90 // All values must be inited
91 ASSERT_HERE(_max_shift != -1);
92 ASSERT_HERE(_klass_range_start != (address)-1);
93 ASSERT_HERE(_klass_range_end != (address)-1);
94 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
95 ASSERT_HERE(_base != (address)-1);
96 ASSERT_HERE(_shift != -1);
97
98 const size_t klass_align = klass_alignment_in_bytes();
99
100 // must be aligned enough hold 64-bit data
101 ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
102
103 // should be smaller than the minimum metaspace chunk size (soft requirement)
104 ASSERT_HERE(klass_align <= K);
105
106 ASSERT_HERE(_klass_range_end > _klass_range_start);
107
108 // Check that Klass range is fully engulfed in the encoding range
109 const address encoding_start = _base;
216 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
217 }
218
219 void CompressedKlassPointers::initialize(address addr, size_t len) {
220
221 if (len > max_klass_range_size()) {
222 stringStream ss;
223 ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
224 len, max_klass_range_size());
225 vm_exit_during_initialization(ss.base());
226 }
227
228 // Remember the Klass range:
229 _klass_range_start = addr;
230 _klass_range_end = addr + len;
231
232 // Calculate Base and Shift:
233
234 if (UseCompactObjectHeaders) {
235
236 // This handles the case that we - experimentally - reduce the number of
237 // class pointer bits further, such that (shift + num bits) < 32.
238 assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()),
239 "klass range size exceeds encoding, len: %zu, narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift());
240
241 // In compact object header mode, with 19-bit narrowKlass, we don't attempt for
242 // zero-based mode. Instead, we set the base to the start of the klass range and
243 // then try for the smallest shift possible that still covers the whole range.
244 // The reason is that we want to avoid, if possible, shifts larger than
245 // a cacheline size.
246 _base = addr;
247
248 const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
249 int s = max_shift();
250 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
251 s--;
252 }
253 _shift = s;
254
255 } else {
256
257 // Traditional (non-compact) header mode
258 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
259 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
260
261 #ifdef AARCH64
|