8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/metaspace.hpp"
28 #include "oops/compressedKlass.hpp"
29 #include "runtime/globals.hpp"
30 #include "runtime/os.hpp"
31 #include "utilities/debug.hpp"
32 #include "utilities/globalDefinitions.hpp"
33 #include "utilities/ostream.hpp"
34
35 address CompressedKlassPointers::_base = nullptr;
36 int CompressedKlassPointers::_shift = 0;
37 size_t CompressedKlassPointers::_range = 0;
38
39 #ifdef _LP64
40
41 #ifdef ASSERT
42 void CompressedKlassPointers::assert_is_valid_encoding(address addr, size_t len, address base, int shift) {
43 assert(base + nth_bit(32 + shift) >= addr + len, "Encoding (base=" PTR_FORMAT ", shift=%d) does not "
44 "fully cover the class range " PTR_FORMAT "-" PTR_FORMAT, p2i(base), shift, p2i(addr), p2i(addr + len));
45 }
46 #endif
47
48 // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then
49 // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
50 // archived heap objects.
51 void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) {
52 address const end = addr + len;
53
54 const int narrow_klasspointer_bits = sizeof(narrowKlass) * 8;
55 const size_t encoding_range_size = nth_bit(narrow_klasspointer_bits + requested_shift);
56 address encoding_range_end = requested_base + encoding_range_size;
57
58 // Note: it would be technically valid for the encoding base to precede the start of the Klass range. But we only call
59 // this function from CDS, and therefore know this to be true.
60 assert(requested_base == addr, "Invalid requested base");
61 assert(encoding_range_end >= end, "Encoding does not cover the full Klass range");
62
63 _base = requested_base;
64 _shift = requested_shift;
65 _range = encoding_range_size;
66
67 DEBUG_ONLY(assert_is_valid_encoding(addr, len, _base, _shift);)
68 }
69
70 char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
71 alignment = MAX2(Metaspace::reserve_alignment(), alignment);
72 return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
73 }
74
75 char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
76 return reserve_address_space_X(0, nth_bit(32), size, Metaspace::reserve_alignment(), aslr);
77 }
78
79 char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) {
80 return reserve_address_space_X(nth_bit(32), nth_bit(32 + LogKlassAlignmentInBytes), size, Metaspace::reserve_alignment(), aslr);
81 }
82
83 char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) {
84 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
85 }
86
87 #if !defined(AARCH64) || defined(ZERO)
88 // On aarch64 we have an own version; all other platforms use the default version
89 void CompressedKlassPointers::initialize(address addr, size_t len) {
90 // The default version of this code tries, in order of preference:
91 // -unscaled (base=0 shift=0)
92 // -zero-based (base=0 shift>0)
93 // -nonzero-base (base>0 shift=0)
94 // Note that base>0 shift>0 should never be needed, since the klass range will
95 // never exceed 4GB.
96 constexpr uintptr_t unscaled_max = nth_bit(32);
97 assert(len <= unscaled_max, "Klass range larger than 32 bits?");
98
99 constexpr uintptr_t zerobased_max = nth_bit(32 + LogKlassAlignmentInBytes);
100
101 address const end = addr + len;
102 if (end <= (address)unscaled_max) {
103 _base = nullptr;
104 _shift = 0;
105 } else {
106 if (end <= (address)zerobased_max) {
107 _base = nullptr;
108 _shift = LogKlassAlignmentInBytes;
109 } else {
110 _base = addr;
111 _shift = 0;
112 }
113 }
114 _range = end - _base;
115
116 DEBUG_ONLY(assert_is_valid_encoding(addr, len, _base, _shift);)
117 }
118 #endif // !AARCH64 || ZERO
119
120 void CompressedKlassPointers::print_mode(outputStream* st) {
121 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
122 "Narrow klass range: " SIZE_FORMAT_X, p2i(base()), shift(),
123 range());
124 }
125
126 #endif // _LP64
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/metaspace.hpp"
28 #include "oops/klass.hpp"
29 #include "oops/compressedKlass.inline.hpp"
30 #include "runtime/globals.hpp"
31 #include "runtime/java.hpp"
32 #include "runtime/os.hpp"
33 #include "utilities/debug.hpp"
34 #include "utilities/globalDefinitions.hpp"
35 #include "utilities/ostream.hpp"
36
37 int CompressedKlassPointers::_tiny_cp = -1;
38 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
39 int CompressedKlassPointers::_max_shift = -1;
40 #ifdef ASSERT
41 address CompressedKlassPointers::_klass_range_start = (address)-1;
42 address CompressedKlassPointers::_klass_range_end = (address)-1;
43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
45 #endif
46
47 address CompressedKlassPointers::_base = (address)-1;
48 int CompressedKlassPointers::_shift = -1;
49 size_t CompressedKlassPointers::_range = (size_t)-1;
50
51 #ifdef _LP64
52
53 // Returns the maximum encoding range that can be covered with the currently
54 // chosen nKlassID geometry (nKlass bit size, max shift)
55 size_t CompressedKlassPointers::max_encoding_range_size() {
56 // Whatever the nKlass geometry is, we don't support cases where the offset
57 // into the Klass encoding range (the shifted nKlass) exceeds 32 bits. That
58 // is because many CPU-specific decoding functions use e.g. 16-bit moves to
59 // combine base and offset.
60 constexpr int max_preshifted_nklass_bits = 32;
61 return nth_bit(MIN2(max_preshifted_nklass_bits,
62 narrow_klass_pointer_bits() + max_shift()));
63 }
64
65 void CompressedKlassPointers::pre_initialize() {
66 if (UseCompactObjectHeaders) {
67 _tiny_cp = 1;
68 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_tinycp;
69 _max_shift = max_shift_tinycp;
70 } else {
71 _tiny_cp = 0;
72 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_legacy;
73 _max_shift = max_shift_legacy;
74 }
75 }
76
77 #ifdef ASSERT
78 void CompressedKlassPointers::sanity_check_after_initialization() {
79 // In expectation of an assert, prepare condensed info to be printed with the assert.
80 char tmp[256];
81 os::snprintf(tmp, sizeof(tmp), PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %d " SIZE_FORMAT " %u %u",
82 p2i(_klass_range_start), p2i(_klass_range_end), p2i(_base), _shift, _range,
83 _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
84 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
85 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
86
87 // All values must be inited
88 ASSERT_HERE(_max_shift != -1);
89 ASSERT_HERE(_klass_range_start != (address)-1);
90 ASSERT_HERE(_klass_range_end != (address)-1);
91 ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
92 ASSERT_HERE(_base != (address)-1);
93 ASSERT_HERE(_shift != -1);
94 ASSERT_HERE(_range != (size_t)-1);
95
96 const size_t klab = klass_alignment_in_bytes();
97 // must be aligned enough hold 64-bit data
98 ASSERT_HERE(is_aligned(klab, sizeof(uint64_t)));
99
100 // should be smaller than the minimum metaspace chunk size (soft requirement)
101 ASSERT_HERE(klab <= K);
102
103 // Check that Klass range is fully engulfed in the encoding range
104 ASSERT_HERE(_klass_range_end > _klass_range_start);
105
106 const address encoding_end = _base + nth_bit(narrow_klass_pointer_bits() + _shift);
107 ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end,
108 "Resulting encoding range does not fully cover the class range");
109
110 // Check that Klass range is aligned to Klass alignment. That should never be an issue since we mmap the
111 // relevant regions and klass alignment - tied to smallest metachunk size of 1K - will always be smaller
112 // than smallest page size of 4K.
113 ASSERT_HERE_2(is_aligned(_klass_range_start, klab) && is_aligned(_klass_range_end, klab),
114 "Klass range must start and end at a properly aligned address");
115
116 // Check that lowest and highest possible narrowKlass values make sense
117 ASSERT_HERE_2(_lowest_valid_narrow_klass_id > 0, "Null is not a valid narrowKlass");
118 ASSERT_HERE(_highest_valid_narrow_klass_id > _lowest_valid_narrow_klass_id);
119
120 Klass* k1 = decode_not_null_without_asserts(_lowest_valid_narrow_klass_id, _base, _shift);
121 ASSERT_HERE_2((address)k1 == _klass_range_start + klab, "Not lowest");
122 narrowKlass nk1 = encode_not_null_without_asserts(k1, _base, _shift);
123 ASSERT_HERE_2(nk1 == _lowest_valid_narrow_klass_id, "not reversible");
124
125 Klass* k2 = decode_not_null_without_asserts(_highest_valid_narrow_klass_id, _base, _shift);
126 // _highest_valid_narrow_klass_id must be decoded to the highest theoretically possible
127 // valid Klass* position in range, if we assume minimal Klass size
128 ASSERT_HERE((address)k2 < _klass_range_end);
129 ASSERT_HERE_2(align_up(((address)k2 + sizeof(Klass)), klab) >= _klass_range_end, "Not highest");
130 narrowKlass nk2 = encode_not_null_without_asserts(k2, _base, _shift);
131 ASSERT_HERE_2(nk2 == _highest_valid_narrow_klass_id, "not reversible");
132
133 #ifdef AARCH64
134 // On aarch64, we never expect a shift value > 0 in legacy mode
135 ASSERT_HERE_2(tiny_classpointer_mode() || _shift == 0, "Shift > 0 in legacy mode?");
136 #endif
137 #undef ASSERT_HERE
138 #undef ASSERT_HERE_2
139 }
140
141 void CompressedKlassPointers::calc_lowest_highest_narrow_klass_id() {
142 // Given a Klass range, calculate lowest and highest narrowKlass.
143 const size_t klab = klass_alignment_in_bytes();
144 // Note that 0 is not a valid narrowKlass, and Metaspace prevents us for that reason from allocating at
145 // the very start of class space. So the very first valid Klass position is start-of-range + klab.
146 _lowest_valid_narrow_klass_id =
147 (narrowKlass) (((uintptr_t)(_klass_range_start - _base) + klab) >> _shift);
148 address highest_possible_klass = align_down(_klass_range_end - sizeof(Klass), klab);
149 _highest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(highest_possible_klass - _base) >> _shift);
150 }
151 #endif // ASSERT
152
153 // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then
154 // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
155 // archived heap objects.
156 void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) {
157 address const end = addr + len;
158
159 if (len > max_encoding_range_size()) {
160 stringStream ss;
161 ss.print("Class space size and CDS archive size combined (%zu) "
162 "exceed the maximum possible size (%zu)",
163 len, max_encoding_range_size());
164 vm_exit_during_initialization(ss.base());
165 }
166
167 const size_t encoding_range_size = nth_bit(narrow_klass_pointer_bits() + requested_shift);
168 address encoding_range_end = requested_base + encoding_range_size;
169
170 // Note: it would be technically valid for the encoding base to precede the start of the Klass range. But we only call
171 // this function from CDS, and therefore know this to be true.
172 assert(requested_base == addr, "Invalid requested base");
173
174 _base = requested_base;
175 _shift = requested_shift;
176 _range = encoding_range_size;
177
178 #ifdef ASSERT
179 _klass_range_start = addr;
180 _klass_range_end = addr + len;
181 calc_lowest_highest_narrow_klass_id();
182 sanity_check_after_initialization();
183 #endif
184
185 DEBUG_ONLY(sanity_check_after_initialization();)
186 }
187
188 char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
189 alignment = MAX2(Metaspace::reserve_alignment(), alignment);
190 return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
191 }
192
193 char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
194 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
195 return reserve_address_space_X(0, unscaled_max, size, Metaspace::reserve_alignment(), aslr);
196 }
197
198 char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) {
199 const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
200 const size_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
201 return reserve_address_space_X(unscaled_max, zerobased_max, size, Metaspace::reserve_alignment(), aslr);
202 }
203
204 char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) {
205 return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
206 }
207
208 void CompressedKlassPointers::initialize(address addr, size_t len) {
209
210 if (len > max_encoding_range_size()) {
211 stringStream ss;
212 ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
213 len, max_encoding_range_size());
214 vm_exit_during_initialization(ss.base());
215 }
216
217 // Give CPU a shot at a specialized init sequence
218 #ifndef ZERO
219 if (pd_initialize(addr, len)) {
220 return;
221 }
222 #endif
223
224 if (tiny_classpointer_mode()) {
225
226 // In tiny classpointer mode, we don't attempt for zero-based mode.
227 // Instead, we set the base to the start of the klass range and then try
228 // for the smallest shift possible that still covers the whole range.
229 // The reason is that we want to avoid, if possible, shifts larger than
230 // a cacheline size.
231 _base = addr;
232 _range = len;
233
234 constexpr int log_cacheline = 6;
235 int s = max_shift();
236 while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
237 s--;
238 }
239 _shift = s;
240
241 } else {
242
243 // In legacy mode, we try, in order of preference:
244 // -unscaled (base=0 shift=0)
245 // -zero-based (base=0 shift>0)
246 // -nonzero-base (base>0 shift=0)
247 // Note that base>0 shift>0 should never be needed, since the klass range will
248 // never exceed 4GB.
249 const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
250 const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
251
252 address const end = addr + len;
253 if (end <= (address)unscaled_max) {
254 _base = nullptr;
255 _shift = 0;
256 } else {
257 if (end <= (address)zerobased_max) {
258 _base = nullptr;
259 _shift = max_shift();
260 } else {
261 _base = addr;
262 _shift = 0;
263 }
264 }
265 _range = end - _base;
266
267 }
268
269 #ifdef ASSERT
270 _klass_range_start = addr;
271 _klass_range_end = addr + len;
272 calc_lowest_highest_narrow_klass_id();
273 sanity_check_after_initialization();
274 #endif
275 }
276
277 void CompressedKlassPointers::print_mode(outputStream* st) {
278 st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d, "
279 "narrow klass pointer bits %d, max shift %d",
280 UseCompressedClassPointers, UseCompactObjectHeaders,
281 _narrow_klass_pointer_bits, _max_shift);
282 if (_base == (address)-1) {
283 st->print_cr("Narrow klass encoding not initialized");
284 return;
285 }
286 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
287 "Narrow klass range: " SIZE_FORMAT_X, p2i(base()), shift(),
288 range());
289 #ifdef ASSERT
290 st->print_cr("Klass range: [" PTR_FORMAT "," PTR_FORMAT ")",
291 p2i(_klass_range_start), p2i(_klass_range_end));
292 st->print_cr("Lowest valid nklass id: %u Highest valid nklass id: %u",
293 _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
294 #endif
295 }
296
297 #endif // _LP64
|