< prev index next >

src/hotspot/share/oops/compressedKlass.cpp

Print this page

  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "logging/log.hpp"
 27 #include "memory/metaspace.hpp"
 28 #include "oops/compressedKlass.hpp"

 29 #include "runtime/globals.hpp"

 30 #include "runtime/os.hpp"
 31 #include "utilities/debug.hpp"
 32 #include "utilities/globalDefinitions.hpp"
 33 #include "utilities/ostream.hpp"
 34 
 35 address CompressedKlassPointers::_base = nullptr;
 36 int CompressedKlassPointers::_shift = 0;
 37 size_t CompressedKlassPointers::_range = 0;



















 38 
 39 #ifdef _LP64
 40 
 41 #ifdef ASSERT
 42 void CompressedKlassPointers::assert_is_valid_encoding(address addr, size_t len, address base, int shift) {
 43   assert(base + nth_bit(32 + shift) >= addr + len, "Encoding (base=" PTR_FORMAT ", shift=%d) does not "
 44          "fully cover the class range " PTR_FORMAT "-" PTR_FORMAT, p2i(base), shift, p2i(addr), p2i(addr + len));






 45 }




























































 46 #endif















 47 
 48 // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then
 49 // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
 50 // archived heap objects.
 51 void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) {
 52   address const end = addr + len;
 53 
 54   const int narrow_klasspointer_bits = sizeof(narrowKlass) * 8;
 55   const size_t encoding_range_size = nth_bit(narrow_klasspointer_bits + requested_shift);





 56   address encoding_range_end = requested_base + encoding_range_size;
 57 
 58   // Note: it would be technically valid for the encoding base to precede the start of the Klass range. But we only call
 59   // this function from CDS, and therefore know this to be true.
 60   assert(requested_base == addr, "Invalid requested base");
 61   assert(encoding_range_end >= end, "Encoding does not cover the full Klass range");
 62 
 63   _base = requested_base;
 64   _shift = requested_shift;
 65   _range = encoding_range_size;
 66 
 67   DEBUG_ONLY(assert_is_valid_encoding(addr, len, _base, _shift);)







 68 }
 69 
 70 char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
 71   alignment = MAX2(Metaspace::reserve_alignment(), alignment);
 72   return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
 73 }
 74 
 75 char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
 76   return reserve_address_space_X(0, nth_bit(32), size, Metaspace::reserve_alignment(), aslr);

 77 }
 78 
 79 char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) {
 80   return reserve_address_space_X(nth_bit(32), nth_bit(32 + LogKlassAlignmentInBytes), size, Metaspace::reserve_alignment(), aslr);


 81 }
 82 
 83 char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) {
 84   return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
 85 }
 86 
 87 #if !defined(AARCH64) || defined(ZERO)
 88 // On aarch64 we have an own version; all other platforms use the default version
 89 void CompressedKlassPointers::initialize(address addr, size_t len) {
 90   // The default version of this code tries, in order of preference:
 91   // -unscaled    (base=0 shift=0)
 92   // -zero-based  (base=0 shift>0)
 93   // -nonzero-base (base>0 shift=0)
 94   // Note that base>0 shift>0 should never be needed, since the klass range will
 95   // never exceed 4GB.
 96   constexpr uintptr_t unscaled_max = nth_bit(32);
 97   assert(len <= unscaled_max, "Klass range larger than 32 bits?");
 98 
 99   constexpr uintptr_t zerobased_max = nth_bit(32 + LogKlassAlignmentInBytes);





































100 
101   address const end = addr + len;
102   if (end <= (address)unscaled_max) {
103     _base = nullptr;
104     _shift = 0;
105   } else {
106     if (end <= (address)zerobased_max) {











107       _base = nullptr;
108       _shift = LogKlassAlignmentInBytes;
109     } else {
110       _base = addr;
111       _shift = 0;








112     }


113   }
114   _range = end - _base;
115 
116   DEBUG_ONLY(assert_is_valid_encoding(addr, len, _base, _shift);)





117 }
118 #endif // !AARCH64 || ZERO
119 
120 void CompressedKlassPointers::print_mode(outputStream* st) {








121   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
122                "Narrow klass range: " SIZE_FORMAT_X, p2i(base()), shift(),
123                range());






124 }
125 
126 #endif // _LP64

  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "logging/log.hpp"
 27 #include "memory/metaspace.hpp"
 28 #include "oops/klass.hpp"
 29 #include "oops/compressedKlass.inline.hpp"
 30 #include "runtime/globals.hpp"
 31 #include "runtime/java.hpp"
 32 #include "runtime/os.hpp"
 33 #include "utilities/debug.hpp"
 34 #include "utilities/globalDefinitions.hpp"
 35 #include "utilities/ostream.hpp"
 36 
 37 int CompressedKlassPointers::_tiny_cp = -1;
 38 int CompressedKlassPointers::_narrow_klass_pointer_bits = -1;
 39 int CompressedKlassPointers::_max_shift = -1;
 40 #ifdef ASSERT
 41 address CompressedKlassPointers::_klass_range_start = (address)-1;
 42 address CompressedKlassPointers::_klass_range_end = (address)-1;
 43 narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlass)-1;
 44 narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1;
 45 #endif
 46 
 47 address CompressedKlassPointers::_base = (address)-1;
 48 int CompressedKlassPointers::_shift = -1;
 49 size_t CompressedKlassPointers::_range = (size_t)-1;
 50 
 51 // The maximum allowed length of the Klass range (the address range engulfing
 52 // CDS + class space) must not exceed 32-bit.
 53 // There is a theoretical limit of: must not exceed the size of a fully-shifted
 54 // narrow Klass pointer, which would be 32 + 3 = 35 bits in legacy mode;
 55 // however, keeping this size below 32-bit allows us to use decoding techniques
 56 // like 16-bit moves into the third quadrant on some architectures, and keeps
 57 // the code less complex. 32-bit have always been enough for CDS+class space.
 58 static constexpr size_t max_klass_range_size = 4 * G;
 59 
 60 #ifdef _LP64
 61 
 62 void CompressedKlassPointers::pre_initialize() {
 63   if (UseCompactObjectHeaders) {
 64     _tiny_cp = 1;
 65     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_tinycp;
 66     _max_shift = max_shift_tinycp;
 67   } else {
 68     _tiny_cp = 0;
 69     _narrow_klass_pointer_bits = narrow_klass_pointer_bits_legacy;
 70     _max_shift = max_shift_legacy;
 71   }
 72 }
 73 
 74 #ifdef ASSERT
 75 void CompressedKlassPointers::sanity_check_after_initialization() {
 76   // In expectation of an assert, prepare condensed info to be printed with the assert.
 77   char tmp[256];
 78   os::snprintf(tmp, sizeof(tmp), PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %d " SIZE_FORMAT " %u %u",
 79       p2i(_klass_range_start), p2i(_klass_range_end), p2i(_base), _shift, _range,
 80       _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
 81 #define ASSERT_HERE(cond) assert(cond, " (%s)", tmp);
 82 #define ASSERT_HERE_2(cond, msg) assert(cond, msg " (%s)", tmp);
 83 
 84   // There is no technical reason preventing us from using other klass pointer bit lengths,
 85   // but it should be a deliberate choice
 86   ASSERT_HERE(_narrow_klass_pointer_bits == 32 || _narrow_klass_pointer_bits == 22);
 87 
 88   // All values must be inited
 89   ASSERT_HERE(_max_shift != -1);
 90   ASSERT_HERE(_klass_range_start != (address)-1);
 91   ASSERT_HERE(_klass_range_end != (address)-1);
 92   ASSERT_HERE(_lowest_valid_narrow_klass_id != (narrowKlass)-1);
 93   ASSERT_HERE(_base != (address)-1);
 94   ASSERT_HERE(_shift != -1);
 95   ASSERT_HERE(_range != (size_t)-1);
 96 
 97   const size_t klab = klass_alignment_in_bytes();
 98   ASSERT_HERE(klab >= sizeof(uint64_t) && klab <= K);
 99 
100   // Check that Klass range is fully engulfed in the encoding range
101   ASSERT_HERE(_klass_range_end > _klass_range_start);
102 
103   const address encoding_end = _base + nth_bit(narrow_klass_pointer_bits() + _shift);
104   ASSERT_HERE_2(_klass_range_start >= _base && _klass_range_end <= encoding_end,
105                 "Resulting encoding range does not fully cover the class range");
106 
107   // Check that Klass range is aligned to Klass alignment. That should never be an issue since we mmap the
108   // relevant regions and klass alignment - tied to smallest metachunk size of 1K - will always be smaller
109   // than smallest page size of 4K.
110   ASSERT_HERE_2(is_aligned(_klass_range_start, klab) && is_aligned(_klass_range_end, klab),
111                 "Klass range must start at a properly aligned address");
112 
113   // Check that lowest and highest possible narrowKlass values make sense
114   ASSERT_HERE_2(_lowest_valid_narrow_klass_id > 0, "Null is not a valid narrowKlass");
115   ASSERT_HERE(_highest_valid_narrow_klass_id > _lowest_valid_narrow_klass_id);
116 
117   Klass* k1 = decode_not_null_without_asserts(_lowest_valid_narrow_klass_id, _base, _shift);
118   ASSERT_HERE_2((address)k1 == _klass_range_start + klab, "Not lowest");
119   narrowKlass nk1 = encode_not_null_without_asserts(k1, _base, _shift);
120   ASSERT_HERE_2(nk1 == _lowest_valid_narrow_klass_id, "not reversible");
121 
122   Klass* k2 = decode_not_null_without_asserts(_highest_valid_narrow_klass_id, _base, _shift);
123   // _highest_valid_narrow_klass_id must be decoded to the highest theoretically possible
124   // valid Klass* position in range, if we assume minimal Klass size
125   ASSERT_HERE((address)k2 < _klass_range_end);
126   ASSERT_HERE_2(align_up(((address)k2 + sizeof(Klass)), klab) >= _klass_range_end, "Not highest");
127   narrowKlass nk2 = encode_not_null_without_asserts(k2, _base, _shift);
128   ASSERT_HERE_2(nk2 == _highest_valid_narrow_klass_id, "not reversible");
129 
130 #ifdef AARCH64
131   // On aarch64, we never expect a shift value > 0 in legacy mode
132   ASSERT_HERE_2(tiny_classpointer_mode() || _shift == 0, "Shift > 0 in legacy mode?");
133 #endif
134 #undef ASSERT_HERE
135 #undef ASSERT_HERE_2
136 }
137 
138 void CompressedKlassPointers::calc_lowest_highest_narrow_klass_id() {
139   // Given a Klass range, calculate lowest and highest narrowKlass.
140   const size_t klab = klass_alignment_in_bytes();
141   // Note that 0 is not a valid narrowKlass, and Metaspace prevents us for that reason from allocating at
142   // the very start of class space. So the very first valid Klass position is start-of-range + klab.
143   _lowest_valid_narrow_klass_id =
144       (narrowKlass) (((uintptr_t)(_klass_range_start - _base) + klab) >> _shift);
145   address highest_possible_klass = align_down(_klass_range_end - sizeof(Klass), klab);
146   _highest_valid_narrow_klass_id = (narrowKlass) ((uintptr_t)(highest_possible_klass - _base) >> _shift);
147 }
148 #endif // ASSERT
149 
150 // Given a klass range [addr, addr+len) and a given encoding scheme, assert that this scheme covers the range, then
151 // set this encoding scheme. Used by CDS at runtime to re-instate the scheme used to pre-compute klass ids for
152 // archived heap objects.
153 void CompressedKlassPointers::initialize_for_given_encoding(address addr, size_t len, address requested_base, int requested_shift) {
154   address const end = addr + len;
155 
156   if (len > max_klass_range_size) {
157     // Class space size is limited to 3G. This can theoretically happen if the CDS archive
158     // is larger than 1G and class space size is set to the maximum possible 3G.
159     vm_exit_during_initialization("Sum of CDS archive size and class space size exceed 4 GB");
160   }
161 
162   const size_t encoding_range_size = nth_bit(narrow_klass_pointer_bits() + requested_shift);
163   address encoding_range_end = requested_base + encoding_range_size;
164 
165   // Note: it would be technically valid for the encoding base to precede the start of the Klass range. But we only call
166   // this function from CDS, and therefore know this to be true.
167   assert(requested_base == addr, "Invalid requested base");

168 
169   _base = requested_base;
170   _shift = requested_shift;
171   _range = encoding_range_size;
172 
173 #ifdef ASSERT
174   _klass_range_start = addr;
175   _klass_range_end = addr + len;
176   calc_lowest_highest_narrow_klass_id();
177   sanity_check_after_initialization();
178 #endif
179 
180   DEBUG_ONLY(sanity_check_after_initialization();)
181 }
182 
183 char* CompressedKlassPointers::reserve_address_space_X(uintptr_t from, uintptr_t to, size_t size, size_t alignment, bool aslr) {
184   alignment = MAX2(Metaspace::reserve_alignment(), alignment);
185   return os::attempt_reserve_memory_between((char*)from, (char*)to, size, alignment, aslr);
186 }
187 
188 char* CompressedKlassPointers::reserve_address_space_for_unscaled_encoding(size_t size, bool aslr) {
189   const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
190   return reserve_address_space_X(0, unscaled_max, size, Metaspace::reserve_alignment(), aslr);
191 }
192 
193 char* CompressedKlassPointers::reserve_address_space_for_zerobased_encoding(size_t size, bool aslr) {
194   const size_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
195   const size_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
196   return reserve_address_space_X(unscaled_max, zerobased_max, size, Metaspace::reserve_alignment(), aslr);
197 }
198 
199 char* CompressedKlassPointers::reserve_address_space_for_16bit_move(size_t size, bool aslr) {
200   return reserve_address_space_X(nth_bit(32), nth_bit(48), size, nth_bit(32), aslr);
201 }
202 


203 void CompressedKlassPointers::initialize(address addr, size_t len) {








204 
205   if (len > max_klass_range_size) {
206     // Class space size is limited to 3G. This can theoretically happen if the CDS archive
207     // is larger than 1G and class space size is set to the maximum possible 3G.
208     vm_exit_during_initialization("Sum of CDS archive size and class space size exceed 4 GB");
209   }
210 
211   // Give CPU a shot at a specialized init sequence
212 #ifndef ZERO
213   if (pd_initialize(addr, len)) {
214     return;
215   }
216 #endif
217 
218   if (tiny_classpointer_mode()) {
219 
220     // This handles the case that we - experimentally - reduce the number of
221     // class pointer bits further, such that (shift + num bits) < 32.
222     assert(len <= (size_t)nth_bit(narrow_klass_pointer_bits() + max_shift()),
223            "klass range size exceeds encoding");
224 
225     // In tiny classpointer mode, we don't attempt for zero-based mode.
226     // Instead, we set the base to the start of the klass range and then try
227     // for the smallest shift possible that still covers the whole range.
228     // The reason is that we want to avoid, if possible, shifts larger than
229     // a cacheline size.
230     _base = addr;
231     _range = len;
232 
233     if (TinyClassPointerShift != 0) {
234       _shift = TinyClassPointerShift;
235     } else {
236       constexpr int log_cacheline = 6;
237       int s = max_shift();
238       while (s > log_cacheline && ((size_t)nth_bit(narrow_klass_pointer_bits() + s - 1) > len)) {
239         s--;
240       }
241       _shift = s;
242     }
243 




244   } else {
245 
246     // In legacy mode, we try, in order of preference:
247     // -unscaled    (base=0 shift=0)
248     // -zero-based  (base=0 shift>0)
249     // -nonzero-base (base>0 shift=0)
250     // Note that base>0 shift>0 should never be needed, since the klass range will
251     // never exceed 4GB.
252     const uintptr_t unscaled_max = nth_bit(narrow_klass_pointer_bits());
253     const uintptr_t zerobased_max = nth_bit(narrow_klass_pointer_bits() + max_shift());
254 
255     address const end = addr + len;
256     if (end <= (address)unscaled_max) {
257       _base = nullptr;



258       _shift = 0;
259     } else {
260       if (end <= (address)zerobased_max) {
261         _base = nullptr;
262         _shift = max_shift();
263       } else {
264         _base = addr;
265         _shift = 0;
266       }
267     }
268     _range = end - _base;
269 
270   }

271 
272 #ifdef ASSERT
273   _klass_range_start = addr;
274   _klass_range_end = addr + len;
275   calc_lowest_highest_narrow_klass_id();
276   sanity_check_after_initialization();
277 #endif
278 }

279 
280 void CompressedKlassPointers::print_mode(outputStream* st) {
281   st->print_cr("UseCompressedClassPointers %d, UseCompactObjectHeaders %d, "
282                "narrow klass pointer bits %d, max shift %d",
283                UseCompressedClassPointers, UseCompactObjectHeaders,
284                _narrow_klass_pointer_bits, _max_shift);
285   if (_base == (address)-1) {
286     st->print_cr("Narrow klass encoding not initialized");
287     return;
288   }
289   st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
290                "Narrow klass range: " SIZE_FORMAT_X, p2i(base()), shift(),
291                range());
292 #ifdef ASSERT
293   st->print_cr("Klass range: [" PTR_FORMAT "," PTR_FORMAT ")",
294                p2i(_klass_range_start), p2i(_klass_range_end));
295   st->print_cr("Lowest valid nklass id: %u Highest valid nklass id: %u",
296                _lowest_valid_narrow_klass_id, _highest_valid_narrow_klass_id);
297 #endif
298 }
299 
300 #endif // _LP64
< prev index next >