160
161 void CompressedOops::print_mode(outputStream* st) {
162 st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
163 p2i(_heap_address_range.start()), _heap_address_range.byte_size()/M);
164
165 st->print(", Compressed Oops mode: %s", mode_to_string(mode()));
166
167 if (base() != 0) {
168 st->print(": " PTR_FORMAT, p2i(base()));
169 }
170
171 if (shift() != 0) {
172 st->print(", Oop shift amount: %d", shift());
173 }
174
175 if (!use_implicit_null_checks()) {
176 st->print(", no protected page in front of the heap");
177 }
178 st->cr();
179 }
180
181 // For UseCompressedClassPointers.
182 NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { nullptr, 0, true };
183
184 // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
185 // (Todo: we should #ifdef out CompressedKlassPointers for 32bit completely and fix all call sites which
186 // are compiled for 32bit to LP64_ONLY).
187 size_t CompressedKlassPointers::_range = 0;
188
189
190 // Given an address range [addr, addr+len) which the encoding is supposed to
191 // cover, choose base, shift and range.
192 // The address range is the expected range of uncompressed Klass pointers we
193 // will encounter (and the implicit promise that there will be no Klass
194 // structures outside this range).
195 void CompressedKlassPointers::initialize(address addr, size_t len) {
196 #ifdef _LP64
197 assert(is_valid_base(addr), "Address must be a valid encoding base");
198 address const end = addr + len;
199
200 address base;
201 int shift;
202 size_t range;
203
204 if (UseSharedSpaces || DumpSharedSpaces) {
205
206 // Special requirements if CDS is active:
207 // Encoding base and shift must be the same between dump and run time.
208 // CDS takes care that the SharedBaseAddress and CompressedClassSpaceSize
209 // are the same. Archive size will be probably different at runtime, but
210 // it can only be smaller than at, never larger, since archives get
211 // shrunk at the end of the dump process.
212 // From that it follows that the range [addr, len) we are handed in at
213 // runtime will start at the same address then at dumptime, and its len
214 // may be smaller at runtime then it was at dump time.
215 //
216 // To be very careful here, we avoid any optimizations and just keep using
217 // the same address and shift value. Specifically we avoid using zero-based
218 // encoding. We also set the expected value range to 4G (encoding range
219 // cannot be larger than that).
220
221 base = addr;
222
223 // JDK-8265705
224 // This is a temporary fix for aarch64: there, if the range-to-be-encoded is located
225 // below 32g, either encoding base should be zero or base should be aligned to 4G
226 // and shift should be zero. The simplest way to fix this for now is to force
227 // shift to zero for both runtime and dumptime.
228 // Note however that this is not a perfect solution. Ideally this whole function
229 // should be CDS agnostic, that would simplify it - and testing - a lot. See JDK-8267141
230 // for details.
231 shift = 0;
232
233 // This must be true since at dumptime cds+ccs is 4G, at runtime it can
234 // only be smaller, see comment above.
235 assert(len <= 4 * G, "Encoding range cannot be larger than 4G");
236 range = 4 * G;
237
238 } else {
239
240 // Otherwise we attempt to use a zero base if the range fits in lower 32G.
241 if (end <= (address)KlassEncodingMetaspaceMax) {
242 base = 0;
243 } else {
244 base = addr;
245 }
246
247 // Highest offset a Klass* can ever have in relation to base.
248 range = end - base;
249
250 // We may not even need a shift if the range fits into 32bit:
251 const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
252 if (range < UnscaledClassSpaceMax) {
253 shift = 0;
254 } else {
255 shift = LogKlassAlignmentInBytes;
256 }
257
258 }
259
260 set_base(base);
261 set_shift(shift);
262 set_range(range);
263 #else
264 fatal("64bit only.");
265 #endif
266 }
267
268 // Given an address p, return true if p can be used as an encoding base.
269 // (Some platforms have restrictions of what constitutes a valid base address).
270 bool CompressedKlassPointers::is_valid_base(address p) {
271 #ifdef AARCH64
272 // Below 32G, base must be aligned to 4G.
273 // Above that point, base must be aligned to 32G
274 if (p < (address)(32 * G)) {
275 return is_aligned(p, 4 * G);
276 }
277 return is_aligned(p, (4 << LogKlassAlignmentInBytes) * G);
278 #else
279 return true;
280 #endif
281 }
282
283 void CompressedKlassPointers::print_mode(outputStream* st) {
284 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
285 "Narrow klass range: " SIZE_FORMAT_X, p2i(base()), shift(),
286 range());
287 }
288
289 void CompressedKlassPointers::set_base(address base) {
290 assert(UseCompressedClassPointers, "no compressed klass ptrs?");
291 _narrow_klass._base = base;
292 }
293
294 void CompressedKlassPointers::set_shift(int shift) {
295 assert(shift == 0 || shift == LogKlassAlignmentInBytes, "invalid shift for klass ptrs");
296 _narrow_klass._shift = shift;
297 }
298
299 void CompressedKlassPointers::set_range(size_t range) {
300 assert(UseCompressedClassPointers, "no compressed klass ptrs?");
301 _range = range;
302 }
|
160
161 void CompressedOops::print_mode(outputStream* st) {
162 st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
163 p2i(_heap_address_range.start()), _heap_address_range.byte_size()/M);
164
165 st->print(", Compressed Oops mode: %s", mode_to_string(mode()));
166
167 if (base() != 0) {
168 st->print(": " PTR_FORMAT, p2i(base()));
169 }
170
171 if (shift() != 0) {
172 st->print(", Oop shift amount: %d", shift());
173 }
174
175 if (!use_implicit_null_checks()) {
176 st->print(", no protected page in front of the heap");
177 }
178 st->cr();
179 }
|