59 if (UseCompactObjectHeaders) {
60 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
61 _max_shift = max_shift_coh;
62 } else {
63 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
64 _max_shift = max_shift_noncoh;
65 }
66 }
67
68 #ifdef ASSERT
69 void CompressedKlassPointers::sanity_check_after_initialization() {
70 // In expectation of an assert, prepare condensed info to be printed with the assert.
71 char tmp[256];
72 os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
73 " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
74 RANGE2FMTARGS(_klass_range_start, _klass_range_end),
75 p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
76 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
77 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
78
79 // All values must be inited
80 ASSERT_HERE(_max_shift != -1);
81 ASSERT_HERE(_klass_range_start != (address)-1);
82 ASSERT_HERE(_klass_range_end != (address)-1);
83 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
84 ASSERT_HERE(_base != (address)-1);
85 ASSERT_HERE(_shift != -1);
86
87 const size_t klass_align = klass_alignment_in_bytes();
88
89 // must be aligned enough hold 64-bit data
90 ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
91
92 // should be smaller than the minimum metaspace chunk size (soft requirement)
93 ASSERT_HERE(klass_align <= K);
94
95 ASSERT_HERE(_klass_range_end > _klass_range_start);
96
97 // Check that Klass range is fully engulfed in the encoding range
98 const address encoding_start = _base;
201 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
202 }
203
204 void CompressedKlassPointers::initialize(address addr, size_t len) {
205
206 if (len > max_klass_range_size()) {
207 stringStream ss;
208 ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
209 len, max_klass_range_size());
210 vm_exit_during_initialization(ss.base());
211 }
212
213 // Remember the Klass range:
214 _klass_range_start = addr;
215 _klass_range_end = addr + len;
216
217 // Calculate Base and Shift:
218
219 if (UseCompactObjectHeaders) {
220
221 // In compact object header mode, with 22-bit narrowKlass, we don't attempt for
222 // zero-based mode. Instead, we set the base to the start of the klass range and
223 // then try for the smallest shift possible that still covers the whole range.
224 // The reason is that we want to avoid, if possible, shifts larger than
225 // a cacheline size.
226 _base = addr;
227
228 const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
229 int s = max_shift();
230 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
231 s--;
232 }
233 _shift = s;
234
235 } else {
236
237 // Traditional (non-compact) header mode
238 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
239 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
240
241 #ifdef AARCH64
|
59 if (UseCompactObjectHeaders) {
60 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
61 _max_shift = max_shift_coh;
62 } else {
63 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
64 _max_shift = max_shift_noncoh;
65 }
66 }
67
68 #ifdef ASSERT
69 void CompressedKlassPointers::sanity_check_after_initialization() {
70 // In expectation of an assert, prepare condensed info to be printed with the assert.
71 char tmp[256];
72 os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
73 " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
74 RANGE2FMTARGS(_klass_range_start, _klass_range_end),
75 p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
76 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
77 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
78
79 // There is no technical reason preventing us from using other klass pointer bit lengths,
80 // but it should be a deliberate choice
81 ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19);
82
83 // All values must be inited
84 ASSERT_HERE(_max_shift != -1);
85 ASSERT_HERE(_klass_range_start != (address)-1);
86 ASSERT_HERE(_klass_range_end != (address)-1);
87 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
88 ASSERT_HERE(_base != (address)-1);
89 ASSERT_HERE(_shift != -1);
90
91 const size_t klass_align = klass_alignment_in_bytes();
92
93 // must be aligned enough hold 64-bit data
94 ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
95
96 // should be smaller than the minimum metaspace chunk size (soft requirement)
97 ASSERT_HERE(klass_align <= K);
98
99 ASSERT_HERE(_klass_range_end > _klass_range_start);
100
101 // Check that Klass range is fully engulfed in the encoding range
102 const address encoding_start = _base;
205 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
206 }
207
208 void CompressedKlassPointers::initialize(address addr, size_t len) {
209
210 if (len > max_klass_range_size()) {
211 stringStream ss;
212 ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
213 len, max_klass_range_size());
214 vm_exit_during_initialization(ss.base());
215 }
216
217 // Remember the Klass range:
218 _klass_range_start = addr;
219 _klass_range_end = addr + len;
220
221 // Calculate Base and Shift:
222
223 if (UseCompactObjectHeaders) {
224
225 // This handles the case that we - experimentally - reduce the number of
226 // class pointer bits further, such that (shift + num bits) < 32.
227 assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()),
228 "klass range size exceeds encoding, len: %zu, narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift());
229
230 // In compact object header mode, with 19-bit narrowKlass, we don't attempt for
231 // zero-based mode. Instead, we set the base to the start of the klass range and
232 // then try for the smallest shift possible that still covers the whole range.
233 // The reason is that we want to avoid, if possible, shifts larger than
234 // a cacheline size.
235 _base = addr;
236
237 const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
238 int s = max_shift();
239 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
240 s--;
241 }
242 _shift = s;
243
244 } else {
245
246 // Traditional (non-compact) header mode
247 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
248 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
249
250 #ifdef AARCH64
|