< prev index next > src/hotspot/share/oops/compressedKlass.cpp
Print this page
size_t CompressedKlassPointers::_protection_zone_size = 0;
#ifdef _LP64
size_t CompressedKlassPointers::max_klass_range_size() {
! // We disallow klass range sizes larger than 4GB even if the encoding
! // range would allow for a larger Klass range (e.g. Base=zero, shift=3 -> 32GB).
! // That is because many CPU-specific compiler decodings do not want the
! // shifted narrow Klass to spill over into the third quadrant of the 64-bit target
! // address, e.g. to use a 16-bit move for a simplified base addition.
! return MIN2(4 * G, max_encoding_range_size());
}
void CompressedKlassPointers::pre_initialize() {
if (UseCompactObjectHeaders) {
_narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
size_t CompressedKlassPointers::_protection_zone_size = 0;
#ifdef _LP64
size_t CompressedKlassPointers::max_klass_range_size() {
! #ifdef _LP64
! const size_t encoding_allows = nth_bit(narrow_klass_pointer_bits() + max_shift());
! assert(!UseCompactObjectHeaders || max_klass_range_size_coh == encoding_allows, "Sanity");
! constexpr size_t cap = 4 * G;
! return MIN2(encoding_allows, cap);
! #else
+ // 32-bit: only 32-bit "narrow" Klass pointers allowed. If we ever support smaller narrow
+ // Klass pointers here, coding needs to be revised.
+ // We keep one page safety zone free to guard against size_t overflows on 32-bit. In practice
+ // this is irrelevant because these upper address space parts are not user-addressable on
+ // any of our 32-bit platforms.
+ return align_down(UINT_MAX, os::vm_page_size());
+ #endif
}
void CompressedKlassPointers::pre_initialize() {
if (UseCompactObjectHeaders) {
_narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
RANGE2FMTARGS(_klass_range_start, _klass_range_end),
p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
#define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
#define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
+ // There is no technical reason preventing us from using other klass pointer bit lengths,
+ // but it should be a deliberate choice
+ ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19);
+
// All values must be inited
ASSERT_HERE(_max_shift != -1);
ASSERT_HERE(_klass_range_start != (address)-1);
ASSERT_HERE(_klass_range_end != (address)-1);
ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
// Calculate Base and Shift:
if (UseCompactObjectHeaders) {
! // In compact object header mode, with 22-bit narrowKlass, we don't attempt for
// zero-based mode. Instead, we set the base to the start of the klass range and
// then try for the smallest shift possible that still covers the whole range.
// The reason is that we want to avoid, if possible, shifts larger than
// a cacheline size.
_base = addr;
// Calculate Base and Shift:
if (UseCompactObjectHeaders) {
! // This handles the case that we - experimentally - reduce the number of
+ // class pointer bits further, such that (shift + num bits) < 32.
+ assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()),
+ "klass range size exceeds encoding, len: %zu, narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift());
+
+ // In compact object header mode, with 19-bit narrowKlass, we don't attempt for
// zero-based mode. Instead, we set the base to the start of the klass range and
// then try for the smallest shift possible that still covers the whole range.
// The reason is that we want to avoid, if possible, shifts larger than
// a cacheline size.
_base = addr;
< prev index next >