< prev index next >

src/hotspot/share/oops/compressedKlass.cpp

Print this page

 28 #include "runtime/globals.hpp"
 29 #include "runtime/java.hpp"
 30 #include "runtime/os.hpp"
 31 #include "utilities/debug.hpp"
 32 #include "utilities/formatBuffer.hpp"
 33 #include "utilities/globalDefinitions.hpp"
 34 #include "utilities/ostream.hpp"
 35 
 36 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
 37 int CompressedKlassPointers::_max_shift = -1;
 38 
 39 address CompressedKlassPointers::_base = (address)-1;
 40 int CompressedKlassPointers::_shift = -1;
 41 address CompressedKlassPointers::_klass_range_start = nullptr;
 42 address CompressedKlassPointers::_klass_range_end = nullptr;
 43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
 44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
 45 size_t CompressedKlassPointers::_protection_zone_size = 0;
 46 
 47 size_t CompressedKlassPointers::max_klass_range_size() {
 48 #ifdef _LP64
 49   const size_t encoding_allows = nth_bit(narrow_klass_pointer_bits() + max_shift());
 50   constexpr size_t cap = 4 * G;
 51   return MIN2(encoding_allows, cap);
 52 #else

 53   // 32-bit: only 32-bit "narrow" Klass pointers allowed. If we ever support smaller narrow
 54   // Klass pointers here, coding needs to be revised.
 55   // We keep one page safety zone free to guard against size_t overflows on 32-bit. In practice
 56   // this is irrelevant because these upper address space parts are not user-addressable on
 57   // any of our 32-bit platforms.
 58   return align_down(UINT_MAX, os::vm_page_size());
 59 #endif
 60 }
 61 
 62 void CompressedKlassPointers::pre_initialize() {
 63   if (UseCompactObjectHeaders) {
 64     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
 65     _max_shift = max_shift_coh;
 66   } else {
 67 #ifdef _LP64
 68     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
 69     _max_shift = max_shift_noncoh;
 70 #else
 71     _narrow_klass_pointer_bits = 32;
 72     _max_shift = 0;
 73 #endif
 74   }
 75 }
 76 
 77 #ifdef ASSERT
 78 void CompressedKlassPointers::sanity_check_after_initialization() {
 79   // In expectation of an assert, prepare condensed info to be printed with the assert.
 80   char tmp[256];
 81   os::snprintf_checked(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
 82                        " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
 83                        RANGE2FMTARGS(_klass_range_start, _klass_range_end),
 84                        p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
 85 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
 86 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
 87 




 88   // All values must be inited
 89   ASSERT_HERE(_max_shift != -1);
 90   ASSERT_HERE(_klass_range_start != (address)-1);
 91   ASSERT_HERE(_klass_range_end != (address)-1);
 92   ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
 93   ASSERT_HERE(_base != (address)-1);
 94   ASSERT_HERE(_shift != -1);
 95 
 96   // We should need a class space if address space is larger than what narrowKlass can address
 97   const bool should_need_class_space = (BytesPerWord * BitsPerByte) > narrow_klass_pointer_bits();
 98   ASSERT_HERE(should_need_class_space == needs_class_space());
 99 
100   const size_t klass_align = klass_alignment_in_bytes();
101 
102   // must be aligned enough hold 64-bit data
103   ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
104 
105   // should be smaller than the minimum metaspace chunk size (soft requirement)
106   ASSERT_HERE(klass_align <= K);
107 

221   return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
222 }
223 
224 void CompressedKlassPointers::initialize(address addr, size_t len) {
225 
226   if (len > max_klass_range_size()) {
227     stringStream ss;
228     ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
229               len, max_klass_range_size());
230     vm_exit_during_initialization(ss.base());
231   }
232 
233   // Remember the Klass range:
234   _klass_range_start = addr;
235   _klass_range_end = addr + len;
236 
237   // Calculate Base and Shift:
238 
239   if (UseCompactObjectHeaders) {
240 
241     // In compact object header mode, with 22-bit narrowKlass, we don't attempt for





242     // zero-based mode. Instead, we set the base to the start of the klass range and
243     // then try for the smallest shift possible that still covers the whole range.
244     // The reason is that we want to avoid, if possible, shifts larger than
245     // a cacheline size.
246     _base = addr;
247 
248     const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
249     int s = max_shift();
250     while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
251       s--;
252     }
253     _shift = s;
254 
255   } else {
256 
257 #ifdef _LP64
258     // Traditional (non-compact) header mode
259     const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
260     const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
261 

 28 #include "runtime/globals.hpp"
 29 #include "runtime/java.hpp"
 30 #include "runtime/os.hpp"
 31 #include "utilities/debug.hpp"
 32 #include "utilities/formatBuffer.hpp"
 33 #include "utilities/globalDefinitions.hpp"
 34 #include "utilities/ostream.hpp"
 35 
 36 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
 37 int CompressedKlassPointers::_max_shift = -1;
 38 
 39 address CompressedKlassPointers::_base = (address)-1;
 40 int CompressedKlassPointers::_shift = -1;
 41 address CompressedKlassPointers::_klass_range_start = nullptr;
 42 address CompressedKlassPointers::_klass_range_end = nullptr;
 43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
 44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
 45 size_t CompressedKlassPointers::_protection_zone_size = 0;
 46 
 47 size_t CompressedKlassPointers::max_klass_range_size() {
 48  #ifdef _LP64
 49    const size_t encoding_allows = nth_bit(narrow_klass_pointer_bits() + max_shift());
 50    assert(!UseCompactObjectHeaders || max_klass_range_size_coh == encoding_allows, "Sanity");
 51    constexpr size_t cap = 4 * G;
 52    return MIN2(encoding_allows, cap);
 53  #else
 54   // 32-bit: only 32-bit "narrow" Klass pointers allowed. If we ever support smaller narrow
 55   // Klass pointers here, coding needs to be revised.
 56   // We keep one page safety zone free to guard against size_t overflows on 32-bit. In practice
 57   // this is irrelevant because these upper address space parts are not user-addressable on
 58   // any of our 32-bit platforms.
 59   return align_down(UINT_MAX, os::vm_page_size());
 60 #endif
 61 }
 62 
 63 void CompressedKlassPointers::pre_initialize() {
 64   if (UseCompactObjectHeaders) {
 65     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
 66     _max_shift = max_shift_coh;
 67   } else {
 68 #ifdef _LP64
 69     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
 70     _max_shift = max_shift_noncoh;
 71 #else
 72     _narrow_klass_pointer_bits = 32;
 73     _max_shift = 0;
 74 #endif
 75   }
 76 }
 77 
 78 #ifdef ASSERT
 79 void CompressedKlassPointers::sanity_check_after_initialization() {
 80   // In expectation of an assert, prepare condensed info to be printed with the assert.
 81   char tmp[256];
 82   os::snprintf_checked(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
 83                        " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
 84                        RANGE2FMTARGS(_klass_range_start, _klass_range_end),
 85                        p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
 86 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
 87 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
 88 
 89   // There is no technical reason preventing us from using other klass pointer bit lengths,
 90   // but it should be a deliberate choice
 91   ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19);
 92 
 93   // All values must be inited
 94   ASSERT_HERE(_max_shift != -1);
 95   ASSERT_HERE(_klass_range_start != (address)-1);
 96   ASSERT_HERE(_klass_range_end != (address)-1);
 97   ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
 98   ASSERT_HERE(_base != (address)-1);
 99   ASSERT_HERE(_shift != -1);
100 
101   // We should need a class space if address space is larger than what narrowKlass can address
102   const bool should_need_class_space = (BytesPerWord * BitsPerByte) > narrow_klass_pointer_bits();
103   ASSERT_HERE(should_need_class_space == needs_class_space());
104 
105   const size_t klass_align = klass_alignment_in_bytes();
106 
107   // must be aligned enough hold 64-bit data
108   ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
109 
110   // should be smaller than the minimum metaspace chunk size (soft requirement)
111   ASSERT_HERE(klass_align <= K);
112 

226   return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
227 }
228 
229 void CompressedKlassPointers::initialize(address addr, size_t len) {
230 
231   if (len > max_klass_range_size()) {
232     stringStream ss;
233     ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
234               len, max_klass_range_size());
235     vm_exit_during_initialization(ss.base());
236   }
237 
238   // Remember the Klass range:
239   _klass_range_start = addr;
240   _klass_range_end = addr + len;
241 
242   // Calculate Base and Shift:
243 
244   if (UseCompactObjectHeaders) {
245 
246     // This handles the case that we - experimentally - reduce the number of
247     // class pointer bits further, such that (shift + num bits) < 32.
248     assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()),
249            "klass range size exceeds encoding, len: %zu, narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift());
250 
251     // In compact object header mode, with 19-bit narrowKlass, we don't attempt for
252     // zero-based mode. Instead, we set the base to the start of the klass range and
253     // then try for the smallest shift possible that still covers the whole range.
254     // The reason is that we want to avoid, if possible, shifts larger than
255     // a cacheline size.
256     _base = addr;
257 
258     const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
259     int s = max_shift();
260     while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
261       s--;
262     }
263     _shift = s;
264 
265   } else {
266 
267 #ifdef _LP64
268     // Traditional (non-compact) header mode
269     const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
270     const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
271 
< prev index next >