1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "logging/log.hpp"
26 #include "memory/memoryReserver.hpp"
27 #include "oops/compressedOops.hpp"
28 #include "oops/markWord.hpp"
29 #include "runtime/globals_extension.hpp"
30 #include "runtime/java.hpp"
31 #include "runtime/os.inline.hpp"
32 #include "utilities/formatBuffer.hpp"
33 #include "utilities/globalDefinitions.hpp"
34 #include "utilities/powerOfTwo.hpp"
35
36 static void sanity_check_size_and_alignment(size_t size, size_t alignment) {
37 assert(size > 0, "Precondition");
38
39 DEBUG_ONLY(const size_t granularity = os::vm_allocation_granularity());
40 assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
41
42 assert(alignment >= granularity, "Must be set");
43 assert(is_power_of_2(alignment), "not a power of 2");
44 assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
45 }
46
47 static void sanity_check_page_size(size_t page_size) {
48 assert(page_size >= os::vm_page_size(), "Invalid page size");
49 assert(is_power_of_2(page_size), "Invalid page size");
50 }
51
52 static void sanity_check_arguments(size_t size, size_t alignment, size_t page_size) {
53 sanity_check_size_and_alignment(size, alignment);
54 sanity_check_page_size(page_size);
55 }
56
57 static bool large_pages_requested() {
58 return UseLargePages &&
59 (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
60 }
61
62 static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
63 if (large_pages_requested()) {
64 // Compressed oops logging.
65 log_debug(gc, heap, coops)("Reserve regular memory without large pages");
66 // JVM style warning that we did not succeed in using large pages.
67 warning("Failed to reserve and commit memory using large pages. "
68 "req_addr: " PTR_FORMAT " bytes: %zu",
69 p2i(req_addr), bytes);
70 }
71 }
72
73 static bool use_explicit_large_pages(size_t page_size) {
74 return !os::can_commit_large_page_memory() &&
75 page_size != os::vm_page_size();
76 }
77
78 static char* reserve_memory_inner(char* requested_address,
79 size_t size,
80 size_t alignment,
81 bool exec,
82 MemTag mem_tag) {
83 // If the memory was requested at a particular address, use
84 // os::attempt_reserve_memory_at() to avoid mapping over something
85 // important. If the reservation fails, return null.
86 if (requested_address != nullptr) {
87 assert(is_aligned(requested_address, alignment),
88 "Requested address " PTR_FORMAT " must be aligned to %zu",
89 p2i(requested_address), alignment);
90 return os::attempt_reserve_memory_at(requested_address, size, mem_tag, exec);
91 }
92
93 // Optimistically assume that the OS returns an aligned base pointer.
94 // When reserving a large address range, most OSes seem to align to at
95 // least 64K.
96 char* base = os::reserve_memory(size, mem_tag, exec);
97 if (is_aligned(base, alignment)) {
98 return base;
99 }
100
101 // Base not aligned, retry.
102 os::release_memory(base, size);
103
104 // Map using the requested alignment.
105 return os::reserve_memory_aligned(size, alignment, mem_tag, exec);
106 }
107
108 ReservedSpace MemoryReserver::reserve_memory(char* requested_address,
109 size_t size,
110 size_t alignment,
111 size_t page_size,
112 bool exec,
113 MemTag mem_tag) {
114 char* base = reserve_memory_inner(requested_address, size, alignment, exec, mem_tag);
115
116 if (base != nullptr) {
117 return ReservedSpace(base, size, alignment, page_size, exec, false /* special */);
118 }
119
120 // Failed
121 return {};
122 }
123
124 ReservedSpace MemoryReserver::reserve_memory_special(char* requested_address,
125 size_t size,
126 size_t alignment,
127 size_t page_size,
128 bool exec) {
129 log_trace(pagesize)("Attempt special mapping: size: " EXACTFMT ", alignment: " EXACTFMT,
130 EXACTFMTARGS(size),
131 EXACTFMTARGS(alignment));
132
133 char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
134
135 if (base != nullptr) {
136 assert(is_aligned(base, alignment),
137 "reserve_memory_special() returned an unaligned address, "
138 "base: " PTR_FORMAT " alignment: 0x%zx",
139 p2i(base), alignment);
140
141 return ReservedSpace(base, size, alignment, page_size, exec, true /* special */);
142 }
143
144 // Failed
145 return {};
146 }
147
148 ReservedSpace MemoryReserver::reserve(char* requested_address,
149 size_t size,
150 size_t alignment,
151 size_t page_size,
152 bool executable,
153 MemTag mem_tag) {
154 sanity_check_arguments(size, alignment, page_size);
155
156 // Reserve the memory.
157
158 // There are basically three different cases that we need to handle:
159 // 1. Mapping backed by a file
160 // 2. Mapping backed by explicit large pages
161 // 3. Mapping backed by normal pages or transparent huge pages
162 // The first two have restrictions that requires the whole mapping to be
163 // committed up front. To record this the ReservedSpace is marked 'special'.
164
165 // == Case 1 ==
166 // This case is contained within the HeapReserver
167
168 // == Case 2 ==
169 if (use_explicit_large_pages(page_size)) {
170 // System can't commit large pages i.e. use transparent huge pages and
171 // the caller requested large pages. To satisfy this request we use
172 // explicit large pages and these have to be committed up front to ensure
173 // no reservations are lost.
174 do {
175 ReservedSpace reserved = reserve_memory_special(requested_address, size, alignment, page_size, executable);
176 if (reserved.is_reserved()) {
177 // Successful reservation using large pages.
178 return reserved;
179 }
180 page_size = os::page_sizes().next_smaller(page_size);
181 } while (page_size > os::vm_page_size());
182
183 // Failed to reserve explicit large pages, do proper logging.
184 log_on_large_pages_failure(requested_address, size);
185 // Now fall back to normal reservation.
186 assert(page_size == os::vm_page_size(), "inv");
187 }
188
189 // == Case 3 ==
190 return reserve_memory(requested_address, size, alignment, page_size, executable, mem_tag);
191 }
192
193 ReservedSpace MemoryReserver::reserve(char* requested_address,
194 size_t size,
195 size_t alignment,
196 size_t page_size,
197 MemTag mem_tag) {
198 return reserve(requested_address,
199 size,
200 alignment,
201 page_size,
202 !ExecMem,
203 mem_tag);
204 }
205
206
207 ReservedSpace MemoryReserver::reserve(size_t size,
208 size_t alignment,
209 size_t page_size,
210 MemTag mem_tag) {
211 return reserve(nullptr /* requested_address */,
212 size,
213 alignment,
214 page_size,
215 mem_tag);
216 }
217
218 ReservedSpace MemoryReserver::reserve(size_t size,
219 MemTag mem_tag) {
220 // Want to use large pages where possible. If the size is
221 // not large page aligned the mapping will be a mix of
222 // large and normal pages.
223 size_t page_size = os::page_size_for_region_unaligned(size, 1);
224 size_t alignment = os::vm_allocation_granularity();
225
226 return reserve(size,
227 alignment,
228 page_size,
229 mem_tag);
230 }
231
232 void MemoryReserver::release(const ReservedSpace& reserved) {
233 assert(reserved.is_reserved(), "Precondition");
234 os::release_memory(reserved.base(), reserved.size());
235 }
236
237 static char* map_memory_to_file(char* requested_address,
238 size_t size,
239 size_t alignment,
240 int fd,
241 MemTag mem_tag) {
242 // If the memory was requested at a particular address, use
243 // os::attempt_reserve_memory_at() to avoid mapping over something
244 // important. If the reservation fails, return null.
245 if (requested_address != nullptr) {
246 assert(is_aligned(requested_address, alignment),
247 "Requested address " PTR_FORMAT " must be aligned to %zu",
248 p2i(requested_address), alignment);
249 return os::attempt_map_memory_to_file_at(requested_address, size, fd, mem_tag);
250 }
251
252 // Optimistically assume that the OS returns an aligned base pointer.
253 // When reserving a large address range, most OSes seem to align to at
254 // least 64K.
255 char* base = os::map_memory_to_file(size, fd, mem_tag);
256 if (is_aligned(base, alignment)) {
257 return base;
258 }
259
260
261 // Base not aligned, retry.
262 os::unmap_memory(base, size);
263
264 // Map using the requested alignment.
265 return os::map_memory_to_file_aligned(size, alignment, fd, mem_tag);
266 }
267
268 ReservedSpace FileMappedMemoryReserver::reserve(char* requested_address,
269 size_t size,
270 size_t alignment,
271 int fd,
272 MemTag mem_tag) {
273 sanity_check_size_and_alignment(size, alignment);
274
275 char* base = map_memory_to_file(requested_address, size, alignment, fd, mem_tag);
276
277 if (base != nullptr) {
278 return ReservedSpace(base, size, alignment, os::vm_page_size(), !ExecMem, true /* special */);
279 }
280
281 // Failed
282 return {};
283 }
284
285 ReservedSpace CodeMemoryReserver::reserve(size_t size,
286 size_t alignment,
287 size_t page_size) {
288 return MemoryReserver::reserve(nullptr /* requested_address */,
289 size,
290 alignment,
291 page_size,
292 ExecMem,
293 mtCode);
294 }
295
296 ReservedHeapSpace HeapReserver::Instance::reserve_uncompressed_oops_heap(size_t size,
297 size_t alignment,
298 size_t page_size) {
299 ReservedSpace reserved = reserve_memory(size, alignment, page_size);
300
301 if (reserved.is_reserved()) {
302 return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
303 }
304
305 // Failed
306 return {};
307 }
308
309
310 static int maybe_create_file(const char* heap_allocation_directory) {
311 if (heap_allocation_directory == nullptr) {
312 return -1;
313 }
314
315 int fd = os::create_file_for_heap(heap_allocation_directory);
316 if (fd == -1) {
317 vm_exit_during_initialization(
318 err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
319 }
320
321 return fd;
322 }
323
324 HeapReserver::Instance::Instance(const char* heap_allocation_directory)
325 : _fd(maybe_create_file(heap_allocation_directory)) {}
326
327 HeapReserver::Instance::~Instance() {
328 if (_fd != -1) {
329 ::close(_fd);
330 }
331 }
332
333 ReservedSpace HeapReserver::Instance::reserve_memory(size_t size,
334 size_t alignment,
335 size_t page_size,
336 char* requested_address) {
337
338 // There are basically three different cases that we need to handle below:
339 // 1. Mapping backed by a file
340 // 2. Mapping backed by explicit large pages
341 // 3. Mapping backed by normal pages or transparent huge pages
342 // The first two have restrictions that requires the whole mapping to be
343 // committed up front. To record this the ReservedSpace is marked 'special'.
344
345 // == Case 1 ==
346 if (_fd != -1) {
347 // When there is a backing file directory for this space then whether
348 // large pages are allocated is up to the filesystem of the backing file.
349 // So UseLargePages is not taken into account for this reservation.
350 //
351 // If requested, let the user know that explicit large pages can't be used.
352 if (use_explicit_large_pages(page_size) && large_pages_requested()) {
353 log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
354 }
355
356 // Always return, not possible to fall back to reservation not using a file.
357 return FileMappedMemoryReserver::reserve(requested_address, size, alignment, _fd, mtJavaHeap);
358 }
359
360 // == Case 2 & 3 ==
361 return MemoryReserver::reserve(requested_address, size, alignment, page_size, mtJavaHeap);
362 }
363
364 // Compressed oop support is not relevant in 32bit builds.
365 #ifdef _LP64
366
367 void HeapReserver::Instance::release(const ReservedSpace& reserved) {
368 if (reserved.is_reserved()) {
369 if (_fd == -1) {
370 os::release_memory(reserved.base(), reserved.size());
371 } else {
372 os::unmap_memory(reserved.base(), reserved.size());
373 }
374 }
375 }
376
377 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
378 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
379 // might still fulfill the wishes of the caller.
380 // Assures the memory is aligned to 'alignment'.
381 ReservedSpace HeapReserver::Instance::try_reserve_memory(size_t size,
382 size_t alignment,
383 size_t page_size,
384 char* requested_address) {
385 // Try to reserve the memory for the heap.
386 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
387 " heap of size 0x%zx",
388 p2i(requested_address),
389 size);
390
391 ReservedSpace reserved = reserve_memory(size, alignment, page_size, requested_address);
392
393 if (reserved.is_reserved()) {
394 // Check alignment constraints.
395 assert(reserved.alignment() == alignment, "Unexpected");
396 assert(is_aligned(reserved.base(), alignment), "Unexpected");
397 return reserved;
398 }
399
400 // Failed
401 return {};
402 }
403
404 ReservedSpace HeapReserver::Instance::try_reserve_range(char *highest_start,
405 char *lowest_start,
406 size_t attach_point_alignment,
407 char *aligned_heap_base_min_address,
408 char *upper_bound,
409 size_t size,
410 size_t alignment,
411 size_t page_size) {
412 assert(is_aligned(highest_start, attach_point_alignment), "precondition");
413 assert(is_aligned(lowest_start, attach_point_alignment), "precondition");
414
415 const size_t attach_range = pointer_delta(highest_start, lowest_start, sizeof(char));
416 const size_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
417 const size_t num_attempts_to_try = MIN2((size_t)HeapSearchSteps, num_attempts_possible);
418 const size_t num_intervals = num_attempts_to_try - 1;
419 const size_t stepsize = num_intervals == 0 ? 0 : align_down(attach_range / num_intervals, attach_point_alignment);
420
421 for (size_t i = 0; i < num_attempts_to_try; ++i) {
422 char* const attach_point = highest_start - stepsize * i;
423 ReservedSpace reserved = try_reserve_memory(size, alignment, page_size, attach_point);
424
425 if (reserved.is_reserved()) {
426 if (reserved.base() >= aligned_heap_base_min_address &&
427 size <= (size_t)(upper_bound - reserved.base())) {
428 // Got a successful reservation.
429 return reserved;
430 }
431
432 release(reserved);
433 }
434 }
435
436 // Failed
437 return {};
438 }
439
440 #define SIZE_64K ((uint64_t) UCONST64( 0x10000))
441 #define SIZE_256M ((uint64_t) UCONST64( 0x10000000))
442 #define SIZE_32G ((uint64_t) UCONST64( 0x800000000))
443
444 // Helper for heap allocation. Returns an array with addresses
445 // (OS-specific) which are suited for disjoint base mode. Array is
446 // null terminated.
447 static char** get_attach_addresses_for_disjoint_mode() {
448 static uint64_t addresses[] = {
449 2 * SIZE_32G,
450 3 * SIZE_32G,
451 4 * SIZE_32G,
452 8 * SIZE_32G,
453 10 * SIZE_32G,
454 1 * SIZE_64K * SIZE_32G,
455 2 * SIZE_64K * SIZE_32G,
456 3 * SIZE_64K * SIZE_32G,
457 4 * SIZE_64K * SIZE_32G,
458 16 * SIZE_64K * SIZE_32G,
459 32 * SIZE_64K * SIZE_32G,
460 34 * SIZE_64K * SIZE_32G,
461 0
462 };
463
464 // Sort out addresses smaller than HeapBaseMinAddress. This assumes
465 // the array is sorted.
466 uint i = 0;
467 while (addresses[i] != 0 &&
468 (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
469 i++;
470 }
471 uint start = i;
472
473 // Avoid more steps than requested.
474 i = 0;
475 while (addresses[start+i] != 0) {
476 if (i == HeapSearchSteps) {
477 addresses[start+i] = 0;
478 break;
479 }
480 i++;
481 }
482
483 return (char**) &addresses[start];
484 }
485
486 // Create protection page at the beginning of the space.
487 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
488 assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
489 assert(reserved.is_reserved(), "should only be called on a reserved memory area");
490
491 if (reserved.end() > (char *)OopEncodingHeapMax || UseCompatibleCompressedOops) {
492 assert((reserved.base() != nullptr), "sanity");
493 if (true
494 WIN64_ONLY(&& !UseLargePages)
495 AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
496 // Protect memory at the base of the allocated region.
497 if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
498 fatal("cannot protect protection page");
499 }
500 log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
501 PTR_FORMAT " / %zd bytes",
502 p2i(reserved.base()),
503 noaccess_prefix);
504 assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
505 } else {
506 CompressedOops::set_use_implicit_null_checks(false);
507 }
508 }
509
510 return reserved.last_part(noaccess_prefix);
511 }
512
513 ReservedHeapSpace HeapReserver::Instance::reserve_compressed_oops_heap(const size_t size, size_t alignment, size_t page_size) {
514 const size_t noaccess_prefix_size = lcm(os::vm_page_size(), alignment);
515 const size_t granularity = os::vm_allocation_granularity();
516
517 assert(size + noaccess_prefix_size <= OopEncodingHeapMax, "can not allocate compressed oop heap for this size");
518 assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
519
520 assert(alignment >= os::vm_page_size(), "alignment too small");
521 assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
522 assert(is_power_of_2(alignment), "not a power of 2");
523
524 // The necessary attach point alignment for generated wish addresses.
525 // This is needed to increase the chance of attaching for mmap and shmat.
526 // AIX is the only platform that uses System V shm for reserving virtual memory.
527 // In this case, the required alignment of the allocated size (64K) and the alignment
528 // of possible start points of the memory region (256M) differ.
529 // This is not reflected by os_allocation_granularity().
530 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
531 const size_t os_attach_point_alignment =
532 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
533 NOT_AIX(os::vm_allocation_granularity());
534
535 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
536
537 uintptr_t aligned_heap_base_min_address = align_up(MAX2(HeapBaseMinAddress, alignment), alignment);
538 uintptr_t heap_end_address = aligned_heap_base_min_address + size;
539
540 bool unscaled = false;
541 bool zerobased = false;
542 if (!UseCompatibleCompressedOops) { // heap base is not enforced
543 unscaled = (heap_end_address <= UnscaledOopHeapMax);
544 zerobased = (heap_end_address <= OopEncodingHeapMax);
545 }
546 size_t noaccess_prefix = !zerobased ? noaccess_prefix_size : 0;
547
548 ReservedSpace reserved{};
549
550 // Attempt to alloc at user-given address.
551 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) || UseCompatibleCompressedOops) {
552 reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, (char*)aligned_heap_base_min_address);
553 if (reserved.base() != (char*)aligned_heap_base_min_address) { // Enforce this exact address.
554 release(reserved);
555 reserved = {};
556 }
557 }
558
559 // Keep heap at HeapBaseMinAddress.
560 if (!reserved.is_reserved()) {
561
562 // Try to allocate the heap at addresses that allow efficient oop compression.
563 // Different schemes are tried, in order of decreasing optimization potential.
564 //
565 // For this, try_reserve_heap() is called with the desired heap base addresses.
566 // A call into the os layer to allocate at a given address can return memory
567 // at a different address than requested. Still, this might be memory at a useful
568 // address. try_reserve_heap() always returns this allocated memory, as only here
569 // the criteria for a good heap are checked.
570
571 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
572 // Give it several tries from top of range to bottom.
573 if (unscaled) {
574
575 // Calc address range within we try to attach (range of possible start addresses).
576 uintptr_t const highest_start = align_down(UnscaledOopHeapMax - size, attach_point_alignment);
577 uintptr_t const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment);
578 assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT ,
579 lowest_start, highest_start);
580 reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
581 (char*)aligned_heap_base_min_address, (char*)UnscaledOopHeapMax, size, alignment, page_size);
582 }
583
584 // zerobased: Attempt to allocate in the lower 32G.
585 const uintptr_t zerobased_max = OopEncodingHeapMax;
586
587 // Give it several tries from top of range to bottom.
588 if (zerobased && // Zerobased theoretical possible.
589 ((!reserved.is_reserved()) || // No previous try succeeded.
590 (reserved.end() > (char*)zerobased_max))) { // Unscaled delivered an arbitrary address.
591
592 // Release previous reservation
593 release(reserved);
594
595 // Calc address range within we try to attach (range of possible start addresses).
596 uintptr_t const highest_start = align_down(zerobased_max - size, attach_point_alignment);
597 // Need to be careful about size being guaranteed to be less
598 // than UnscaledOopHeapMax due to type constraints.
599 uintptr_t lowest_start = aligned_heap_base_min_address;
600 if (size < UnscaledOopHeapMax) {
601 lowest_start = MAX2<uintptr_t>(lowest_start, UnscaledOopHeapMax - size);
602 }
603 lowest_start = align_up(lowest_start, attach_point_alignment);
604 assert(lowest_start <= highest_start, "lowest: " INTPTR_FORMAT " highest: " INTPTR_FORMAT,
605 lowest_start, highest_start);
606 reserved = try_reserve_range((char*)highest_start, (char*)lowest_start, attach_point_alignment,
607 (char*)aligned_heap_base_min_address, (char*)zerobased_max, size, alignment, page_size);
608 }
609
610 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
611 // implement null checks.
612 noaccess_prefix = noaccess_prefix_size;
613
614 // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
615 char** addresses = get_attach_addresses_for_disjoint_mode();
616 int i = 0;
617 while ((addresses[i] != nullptr) && // End of array not yet reached.
618 ((!reserved.is_reserved()) || // No previous try succeeded.
619 (reserved.end() > (char*)zerobased_max && // Not zerobased or unscaled address.
620 // Not disjoint address.
621 !CompressedOops::is_disjoint_heap_base_address((address)reserved.base())))) {
622
623 // Release previous reservation
624 release(reserved);
625
626 char* const attach_point = addresses[i];
627 assert((uintptr_t)attach_point >= aligned_heap_base_min_address, "Flag support broken");
628 reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, attach_point);
629 i++;
630 }
631
632 // Last, desperate try without any placement.
633 if (!reserved.is_reserved()) {
634 log_trace(gc, heap, coops)("Trying to allocate at address null heap of size 0x%zx", size + noaccess_prefix);
635 assert(alignment >= os::vm_page_size(), "Unexpected");
636 reserved = reserve_memory(size + noaccess_prefix, alignment, page_size);
637 }
638 }
639
640 // No more reserve attempts
641
642 if (reserved.is_reserved()) {
643 // Successfully found and reserved memory for the heap.
644
645 if (reserved.size() > size) {
646 // We reserved heap memory with a noaccess prefix.
647
648 assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
649 // It can happen we get a zerobased/unscaled heap with noaccess prefix,
650 // if we had to try at arbitrary address.
651 reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
652 assert(reserved.size() == size, "Prefix should be gone");
653 return ReservedHeapSpace(reserved, noaccess_prefix);
654 }
655
656 // We reserved heap memory without a noaccess prefix.
657 assert(!UseCompatibleCompressedOops, "noaccess prefix is missing");
658 return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
659 }
660
661 // Failed
662 return {};
663 }
664
665 #endif // _LP64
666
667 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
668 if (UseCompressedOops) {
669 #ifdef _LP64
670 return reserve_compressed_oops_heap(size, alignment, page_size);
671 #endif
672 } else {
673 return reserve_uncompressed_oops_heap(size, alignment, page_size);
674 }
675 }
676
677 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
678 sanity_check_arguments(size, alignment, page_size);
679
680 assert(alignment != 0, "Precondition");
681 assert(is_aligned(size, alignment), "Precondition");
682
683 Instance instance(heap_allocation_directory);
684
685 return instance.reserve_heap(size, alignment, page_size);
686 }