< prev index next >

src/hotspot/share/oops/compressedKlass.cpp

Print this page

 58   if (UseCompactObjectHeaders) {
 59     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
 60     _max_shift = max_shift_coh;
 61   } else {
 62     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
 63     _max_shift = max_shift_noncoh;
 64   }
 65 }
 66 
 67 #ifdef ASSERT
 68 void CompressedKlassPointers::sanity_check_after_initialization() {
 69   // In expectation of an assert, prepare condensed info to be printed with the assert.
 70   char tmp[256];
 71   os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
 72       " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
 73       RANGE2FMTARGS(_klass_range_start, _klass_range_end),
 74       p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
 75 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
 76 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
 77 




 78   // All values must be inited
 79   ASSERT_HERE(_max_shift != -1);
 80   ASSERT_HERE(_klass_range_start != (address)-1);
 81   ASSERT_HERE(_klass_range_end != (address)-1);
 82   ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
 83   ASSERT_HERE(_base != (address)-1);
 84   ASSERT_HERE(_shift != -1);
 85 
 86   const size_t klass_align = klass_alignment_in_bytes();
 87 
 88   // must be aligned enough hold 64-bit data
 89   ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
 90 
 91   // should be smaller than the minimum metaspace chunk size (soft requirement)
 92   ASSERT_HERE(klass_align <= K);
 93 
 94   ASSERT_HERE(_klass_range_end > _klass_range_start);
 95 
 96   // Check that Klass range is fully engulfed in the encoding range
 97   const address encoding_start = _base;

199   return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
200 }
201 
202 void CompressedKlassPointers::initialize(address addr, size_t len) {
203 
204   if (len > max_klass_range_size()) {
205     stringStream ss;
206     ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
207               len, max_klass_range_size());
208     vm_exit_during_initialization(ss.base());
209   }
210 
211   // Remember the Klass range:
212   _klass_range_start = addr;
213   _klass_range_end = addr + len;
214 
215   // Calculate Base and Shift:
216 
217   if (UseCompactObjectHeaders) {
218 
219     // In compact object header mode, with 22-bit narrowKlass, we don't attempt for





220     // zero-based mode. Instead, we set the base to the start of the klass range and
221     // then try for the smallest shift possible that still covers the whole range.
222     // The reason is that we want to avoid, if possible, shifts larger than
223     // a cacheline size.
224     _base = addr;
225 
226     const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
227     int s = max_shift();
228     while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
229       s--;
230     }
231     _shift = s;
232 
233   } else {
234 
235     // Traditional (non-compact) header mode
236     const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
237     const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
238 
239 #ifdef AARCH64

 58   if (UseCompactObjectHeaders) {
 59     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
 60     _max_shift = max_shift_coh;
 61   } else {
 62     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
 63     _max_shift = max_shift_noncoh;
 64   }
 65 }
 66 
 67 #ifdef ASSERT
 68 void CompressedKlassPointers::sanity_check_after_initialization() {
 69   // In expectation of an assert, prepare condensed info to be printed with the assert.
 70   char tmp[256];
 71   os::snprintf(tmp, sizeof(tmp), "klass range: " RANGE2FMT ","
 72       " base " PTR_FORMAT ", shift %d, lowest/highest valid narrowKlass %u/%u",
 73       RANGE2FMTARGS(_klass_range_start, _klass_range_end),
 74       p2i(_base), _shift, _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
 75 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
 76 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
 77 
 78   // There is no technical reason preventing us from using other klass pointer bit lengths,
 79   // but it should be a deliberate choice
 80   ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 19);
 81 
 82   // All values must be inited
 83   ASSERT_HERE(_max_shift != -1);
 84   ASSERT_HERE(_klass_range_start != (address)-1);
 85   ASSERT_HERE(_klass_range_end != (address)-1);
 86   ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
 87   ASSERT_HERE(_base != (address)-1);
 88   ASSERT_HERE(_shift != -1);
 89 
 90   const size_t klass_align = klass_alignment_in_bytes();
 91 
 92   // must be aligned enough hold 64-bit data
 93   ASSERT_HERE(is_aligned(klass_align, sizeof(uint64_t)));
 94 
 95   // should be smaller than the minimum metaspace chunk size (soft requirement)
 96   ASSERT_HERE(klass_align <= K);
 97 
 98   ASSERT_HERE(_klass_range_end > _klass_range_start);
 99 
100   // Check that Klass range is fully engulfed in the encoding range
101   const address encoding_start = _base;

203   return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
204 }
205 
206 void CompressedKlassPointers::initialize(address addr, size_t len) {
207 
208   if (len > max_klass_range_size()) {
209     stringStream ss;
210     ss.print("Class space size (%zu) exceeds the maximum possible size (%zu)",
211               len, max_klass_range_size());
212     vm_exit_during_initialization(ss.base());
213   }
214 
215   // Remember the Klass range:
216   _klass_range_start = addr;
217   _klass_range_end = addr + len;
218 
219   // Calculate Base and Shift:
220 
221   if (UseCompactObjectHeaders) {
222 
223     // This handles the case that we - experimentally - reduce the number of
224     // class pointer bits further, such that (shift + num bits) < 32.
225     assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()),
226            "klass range size exceeds encoding, len: " SIZE_FORMAT ", narrow_klass_pointer_bits: %d, max_shift: %d", len, narrow_klass_pointer_bits(), max_shift());
227 
228     // In compact object header mode, with 19-bit narrowKlass, we don't attempt for
229     // zero-based mode. Instead, we set the base to the start of the klass range and
230     // then try for the smallest shift possible that still covers the whole range.
231     // The reason is that we want to avoid, if possible, shifts larger than
232     // a cacheline size.
233     _base = addr;
234 
235     const int log_cacheline = exact_log2(DEFAULT_CACHE_LINE_SIZE);
236     int s = max_shift();
237     while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
238       s--;
239     }
240     _shift = s;
241 
242   } else {
243 
244     // Traditional (non-compact) header mode
245     const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
246     const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
247 
248 #ifdef AARCH64
< prev index next >