< prev index next >

src/hotspot/share/oops/compressedKlass.cpp

Print this page
*** 23,104 ***
   */
  
  #include "precompiled.hpp"
  #include "logging/log.hpp"
  #include "memory/metaspace.hpp"
! #include "oops/compressedKlass.hpp"
  #include "runtime/globals.hpp"
  #include "runtime/os.hpp"
  #include "utilities/debug.hpp"
  #include "utilities/globalDefinitions.hpp"
  #include "utilities/ostream.hpp"
  
! address CompressedKlassPointers::_base = nullptr;
! int CompressedKlassPointers::_shift = 0;
! size_t CompressedKlassPointers::_range = 0;
  
  #ifdef _LP64
  
! #ifdef ASSERT
! void CompressedKlassPointers::assert_is_valid_encoding(address addr, size_t len, address base, int shift) {
!   assert(base + nth_bit(32 + shift) >= addr + len, "Encoding (base=" PTR_FORMAT ", shift=%d) does not "
!          "fully cover the class range " PTR_FORMAT "-" PTR_FORMAT, p2i(base), shift, p2i(addr), p2i(addr + len));
  }
  #endif
  
  // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then
  // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
  // archived heap objects.
  void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) {
    address const end = addr + len;
  
!   const int narrow_klasspointer_bits = sizeof(narrowKlass) * 8;
!   const size_t encoding_range_size = nth_bit(narrow_klasspointer_bits + requested_shift);
    address encoding_range_end = requested_base + encoding_range_size;
  
    // Note: it would be technically valid for the encoding base to precede the start of the Klass range. But we only call
    // this function from CDS, and therefore know this to be true.
    assert(requested_base == addr, "Invalid requested base");
-   assert(encoding_range_end >= end, "Encoding does not cover the full Klass range");
  
    _base = requested_base;
    _shift = requested_shift;
    _range = encoding_range_size;
  
!   DEBUG_ONLY(assert_is_valid_encoding(addr, len, _base, _shift);)
  }
  
  char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
    alignment = MAX2(Metaspace::reserve_alignment(), alignment);
    return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
  }
  
  char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
!   return reserve_address_space_X(0, nth_bit(32), size, Metaspace::reserve_alignment(), aslr);
  }
  
  char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) {
!   return reserve_address_space_X(nth_bit(32), nth_bit(32 + LogKlassAlignmentInBytes), size, Metaspace::reserve_alignment(), aslr);
  }
  
  char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) {
    return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
  }
  
- #if !defined(AARCH64) || defined(ZERO)
- // On aarch64 we have an own version; all other platforms use the default version
  void CompressedKlassPointers::initialize(address addr, size_t len) {
-   // The default version of this code tries, in order of preference:
-   // -unscaled    (base=0 shift=0)
-   // -zero-based  (base=0 shift>0)
-   // -nonzero-base (base>0 shift=0)
-   // Note that base>0 shift>0 should never be needed, since the klass range will
-   // never exceed 4GB.
-   constexpr uintptr_t unscaled_max = nth_bit(32);
-   assert(len <= unscaled_max, "Klass range larger than 32 bits?");
  
!   constexpr uintptr_t zerobased_max = nth_bit(32 + LogKlassAlignmentInBytes);
  
-   address const end = addr + len;
-   if (end <= (address)unscaled_max) {
-     _base = nullptr;
-     _shift = 0;
    } else {
!     if (end <= (address)zerobased_max) {
        _base = nullptr;
-       _shift = LogKlassAlignmentInBytes;
-     } else {
-       _base = addr;
        _shift = 0;
      }
    }
-   _range = end - _base;
  
!   DEBUG_ONLY(assert_is_valid_encoding(addr, len, _base, _shift);)
  }
- #endif // !AARCH64 || ZERO
  
  void CompressedKlassPointers::print_mode(outputStream* st) {
    st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
                 "Narrow klass range: " SIZE_FORMAT_X, p2i(base()), shift(),
                 range());
  }
  
  #endif // _LP64
--- 23,275 ---
   */
  
  #include "precompiled.hpp"
  #include "logging/log.hpp"
  #include "memory/metaspace.hpp"
! #include "oops/klass.hpp"
+ #include "oops/compressedKlass.inline.hpp"
  #include "runtime/globals.hpp"
+ #include "runtime/java.hpp"
  #include "runtime/os.hpp"
  #include "utilities/debug.hpp"
  #include "utilities/globalDefinitions.hpp"
  #include "utilities/ostream.hpp"
  
! int CompressedKlassPointers::_tiny_cp = -1;
! int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
! int CompressedKlassPointers::_max_shift = -1;
+ #ifdef ASSERT
+ address CompressedKlassPointers::_klass_range_start = (address)-1;
+ address CompressedKlassPointers::_klass_range_end = (address)-1;
+ narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
+ narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
+ #endif
+ 
+ address CompressedKlassPointers::_base = (address)-1;
+ int CompressedKlassPointers::_shift = -1;
+ size_t CompressedKlassPointers::_range = (size_t)-1;
  
  #ifdef _LP64
  
! // Returns the maximum encoding range that can be covered with the currently
! // chosen nKlassID geometry (nKlass bit size, max shift)
! size_t CompressedKlassPointers::max_encoding_range_size() {
!   // Whatever the nKlass geometry is, we don't support cases where the offset
+   // into the Klass encoding range (the shifted nKlass) exceeds 32 bits. That
+   // is because many CPU-specific decoding functions use e.g. 16-bit moves to
+   // combine base and offset.
+   constexpr int max_preshifted_nklass_bits = 32;
+   return nth_bit(MIN2(max_preshifted_nklass_bits,
+                       narrow_klass_pointer_bits() + max_shift()));
+ }
+ 
+ void CompressedKlassPointers::pre_initialize() {
+   if (UseCompactObjectHeaders) {
+     _tiny_cp = 1;
+     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_tinycp;
+     _max_shift = max_shift_tinycp;
+   } else {
+     _tiny_cp = 0;
+     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_legacy;
+     _max_shift = max_shift_legacy;
+   }
  }
+ 
+ #ifdef ASSERT
+ void CompressedKlassPointers::sanity_check_after_initialization() {
+   // In expectation of an assert, prepare condensed info to be printed with the assert.
+   char tmp[256];
+   os::snprintf(tmp, sizeof(tmp), PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %d " SIZE_FORMAT " %u %u",
+       p2i(_klass_range_start), p2i(_klass_range_end), p2i(_base), _shift, _range,
+       _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
+ #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
+ #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
+ 
+   // All values must be inited
+   ASSERT_HERE(_max_shift != -1);
+   ASSERT_HERE(_klass_range_start != (address)-1);
+   ASSERT_HERE(_klass_range_end != (address)-1);
+   ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
+   ASSERT_HERE(_base != (address)-1);
+   ASSERT_HERE(_shift != -1);
+   ASSERT_HERE(_range != (size_t)-1);
+ 
+   const size_t klab = klass_alignment_in_bytes();
+   // must be aligned enough hold 64-bit data
+   ASSERT_HERE(is_aligned(klab, sizeof(uint64_t)));
+ 
+   // should be smaller than the minimum metaspace chunk size (soft requirement)
+   ASSERT_HERE(klab <= K);
+ 
+   // Check that Klass range is fully engulfed in the encoding range
+   ASSERT_HERE(_klass_range_end > _klass_range_start);
+ 
+   const address encoding_end = _base + nth_bit(narrow_klass_pointer_bits() + _shift);
+   ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end,
+                 "Resulting encoding range does not fully cover the class range");
+ 
+   // Check that Klass range is aligned to Klass alignment. That should never be an issue since we mmap the
+   // relevant regions and klass alignment - tied to smallest metachunk size of 1K - will always be smaller
+   // than smallest page size of 4K.
+   ASSERT_HERE_2(is_aligned(_klass_range_start, klab) && is_aligned(_klass_range_end, klab),
+                 "Klass range must start and end at a properly aligned address");
+ 
+   // Check that lowest and highest possible narrowKlass values make sense
+   ASSERT_HERE_2(_lowest_valid_narrow_klass_id > 0, "Null is not a valid narrowKlass");
+   ASSERT_HERE(_highest_valid_narrow_klass_id > _lowest_valid_narrow_klass_id);
+ 
+   Klass* k1 = decode_not_null_without_asserts(_lowest_valid_narrow_klass_id, _base, _shift);
+   ASSERT_HERE_2((address)k1 == _klass_range_start + klab, "Not lowest");
+   narrowKlass nk1 = encode_not_null_without_asserts(k1, _base, _shift);
+   ASSERT_HERE_2(nk1 == _lowest_valid_narrow_klass_id, "not reversible");
+ 
+   Klass* k2 = decode_not_null_without_asserts(_highest_valid_narrow_klass_id, _base, _shift);
+   // _highest_valid_narrow_klass_id must be decoded to the highest theoretically possible
+   // valid Klass* position in range, if we assume minimal Klass size
+   ASSERT_HERE((address)k2 < _klass_range_end);
+   ASSERT_HERE_2(align_up(((address)k2 + sizeof(Klass)), klab) >= _klass_range_end, "Not highest");
+   narrowKlass nk2 = encode_not_null_without_asserts(k2, _base, _shift);
+   ASSERT_HERE_2(nk2 == _highest_valid_narrow_klass_id, "not reversible");
+ 
+ #ifdef AARCH64
+   // On aarch64, we never expect a shift value > 0 in legacy mode
+   ASSERT_HERE_2(tiny_classpointer_mode() || _shift == 0, "Shift > 0 in legacy mode?");
  #endif
+ #undef ASSERT_HERE
+ #undef ASSERT_HERE_2
+ }
+ 
+ void CompressedKlassPointers::calc_lowest_highest_narrow_klass_id() {
+   // Given a Klass range, calculate lowest and highest narrowKlass.
+   const size_t klab = klass_alignment_in_bytes();
+   // Note that 0 is not a valid narrowKlass, and Metaspace prevents us for that reason from allocating at
+   // the very start of class space. So the very first valid Klass position is start-of-range + klab.
+   _lowest_valid_narrow_klass_id =
+       (narrowKlass) (((uintptr_t)(_klass_range_start - _base) + klab) >> _shift);
+   address highest_possible_klass = align_down(_klass_range_end - sizeof(Klass), klab);
+   _highest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(highest_possible_klass - _base) >> _shift);
+ }
+ #endif // ASSERT
  
  // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then
  // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
  // archived heap objects.
  void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) {
    address const end = addr + len;
  
!   if (len > max_encoding_range_size()) {
!     stringStream ss;
+     ss.print("Class space size and CDS archive size combined (%zu) "
+              "exceed the maximum possible size (%zu)",
+              len, max_encoding_range_size());
+     vm_exit_during_initialization(ss.base());
+   }
+ 
+   const size_t encoding_range_size = nth_bit(narrow_klass_pointer_bits() + requested_shift);
    address encoding_range_end = requested_base + encoding_range_size;
  
    // Note: it would be technically valid for the encoding base to precede the start of the Klass range. But we only call
    // this function from CDS, and therefore know this to be true.
    assert(requested_base == addr, "Invalid requested base");
  
    _base = requested_base;
    _shift = requested_shift;
    _range = encoding_range_size;
  
! #ifdef ASSERT
+   _klass_range_start = addr;
+   _klass_range_end = addr + len;
+   calc_lowest_highest_narrow_klass_id();
+   sanity_check_after_initialization();
+ #endif
+ 
+   DEBUG_ONLY(sanity_check_after_initialization();)
  }
  
  char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
    alignment = MAX2(Metaspace::reserve_alignment(), alignment);
    return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
  }
  
  char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
!   const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
+   return reserve_address_space_X(0, unscaled_max, size, Metaspace::reserve_alignment(), aslr);
  }
  
  char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) {
!   const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
+   const size_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
+   return reserve_address_space_X(unscaled_max, zerobased_max, size, Metaspace::reserve_alignment(), aslr);
  }
  
  char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) {
    return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
  }
  
  void CompressedKlassPointers::initialize(address addr, size_t len) {
  
!   if (len > max_encoding_range_size()) {
+     stringStream ss;
+     ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
+               len, max_encoding_range_size());
+     vm_exit_during_initialization(ss.base());
+   }
+ 
+   // Give CPU a shot at a specialized init sequence
+ #ifndef ZERO
+   if (pd_initialize(addr, len)) {
+     return;
+   }
+ #endif
+ 
+   if (tiny_classpointer_mode()) {
+ 
+     // In tiny classpointer mode, we don't attempt for zero-based mode.
+     // Instead, we set the base to the start of the klass range and then try
+     // for the smallest shift possible that still covers the whole range.
+     // The reason is that we want to avoid, if possible, shifts larger than
+     // a cacheline size.
+     _base = addr;
+     _range = len;
+ 
+     constexpr int log_cacheline = 6;
+     int s = max_shift();
+     while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
+       s--;
+     }
+     _shift = s;
  
    } else {
! 
+     // In legacy mode, we try, in order of preference:
+     // -unscaled    (base=0 shift=0)
+     // -zero-based  (base=0 shift>0)
+     // -nonzero-base (base>0 shift=0)
+     // Note that base>0 shift>0 should never be needed, since the klass range will
+     // never exceed 4GB.
+     const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
+     const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
+ 
+     address const end = addr + len;
+     if (end <= (address)unscaled_max) {
        _base = nullptr;
        _shift = 0;
+     } else {
+       if (end <= (address)zerobased_max) {
+         _base = nullptr;
+         _shift = max_shift();
+       } else {
+         _base = addr;
+         _shift = 0;
+       }
      }
+     _range = end - _base;
+ 
    }
  
! #ifdef ASSERT
+   _klass_range_start = addr;
+   _klass_range_end = addr + len;
+   calc_lowest_highest_narrow_klass_id();
+   sanity_check_after_initialization();
+ #endif
  }
  
  void CompressedKlassPointers::print_mode(outputStream* st) {
+   st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d, "
+                "narrow klass pointer bits %d, max shift %d",
+                UseCompressedClassPointers, UseCompactObjectHeaders,
+                _narrow_klass_pointer_bits, _max_shift);
+   if (_base == (address)-1) {
+     st->print_cr("Narrow klass encoding not initialized");
+     return;
+   }
    st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
                 "Narrow klass range: " SIZE_FORMAT_X, p2i(base()), shift(),
                 range());
+ #ifdef ASSERT
+   st->print_cr("Klass range: [" PTR_FORMAT "," PTR_FORMAT ")",
+                p2i(_klass_range_start), p2i(_klass_range_end));
+   st->print_cr("Lowest valid nklass id: %u Highest valid nklass id: %u",
+                _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
+ #endif
  }
  
  #endif // _LP64
< prev index next >