1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "jvm.h"
 26 #include "logging/log.hpp"
 27 #include "memory/memoryReserver.hpp"
 28 #include "oops/compressedOops.hpp"
 29 #include "oops/markWord.hpp"
 30 #include "runtime/globals_extension.hpp"
 31 #include "runtime/java.hpp"
 32 #include "runtime/os.inline.hpp"
 33 #include "utilities/formatBuffer.hpp"
 34 #include "utilities/globalDefinitions.hpp"
 35 #include "utilities/powerOfTwo.hpp"
 36 
 37 static void sanity_check_size_and_alignment(size_t size, size_t alignment) {
 38   assert(size > 0, "Precondition");
 39 
 40   DEBUG_ONLY(const size_t granularity = os::vm_allocation_granularity());
 41   assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
 42 
 43   assert(alignment >= granularity, "Must be set");
 44   assert(is_power_of_2(alignment), "not a power of 2");
 45   assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
 46 }
 47 
 48 static void sanity_check_page_size(size_t page_size) {
 49   assert(page_size >= os::vm_page_size(), "Invalid page size");
 50   assert(is_power_of_2(page_size), "Invalid page size");
 51 }
 52 
 53 static void sanity_check_arguments(size_t size, size_t alignment, size_t page_size) {
 54   sanity_check_size_and_alignment(size, alignment);
 55   sanity_check_page_size(page_size);
 56 }
 57 
 58 static bool large_pages_requested() {
 59   return UseLargePages &&
 60          (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
 61 }
 62 
 63 static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
 64   if (large_pages_requested()) {
 65     // Compressed oops logging.
 66     log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 67     // JVM style warning that we did not succeed in using large pages.
 68     char msg[128];
 69     jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory using large pages. "
 70                                    "req_addr: " PTR_FORMAT " bytes: %zu",
 71                                    req_addr, bytes);
 72     warning("%s", msg);
 73   }
 74 }
 75 
 76 static bool use_explicit_large_pages(size_t page_size) {
 77   return !os::can_commit_large_page_memory() &&
 78          page_size != os::vm_page_size();
 79 }
 80 
 81 static char* reserve_memory_inner(char* requested_address,
 82                                   size_t size,
 83                                   size_t alignment,
 84                                   bool exec,
 85                                   MemTag mem_tag) {
 86   // If the memory was requested at a particular address, use
 87   // os::attempt_reserve_memory_at() to avoid mapping over something
 88   // important.  If the reservation fails, return null.
 89   if (requested_address != nullptr) {
 90     assert(is_aligned(requested_address, alignment),
 91            "Requested address " PTR_FORMAT " must be aligned to %zu",
 92            p2i(requested_address), alignment);
 93     return os::attempt_reserve_memory_at(requested_address, size, mem_tag, exec);
 94   }
 95 
 96   // Optimistically assume that the OS returns an aligned base pointer.
 97   // When reserving a large address range, most OSes seem to align to at
 98   // least 64K.
 99   char* base = os::reserve_memory(size, mem_tag, exec);
100   if (is_aligned(base, alignment)) {
101     return base;
102   }
103 
104   // Base not aligned, retry.
105   if (!os::release_memory(base, size)) {
106     fatal("os::release_memory failed");
107   }
108 
109   // Map using the requested alignment.
110   return os::reserve_memory_aligned(size, alignment, mem_tag, exec);
111 }
112 
113 ReservedSpace MemoryReserver::reserve_memory(char* requested_address,
114                                              size_t size,
115                                              size_t alignment,
116                                              bool exec,
117                                              MemTag mem_tag) {
118   char* base = reserve_memory_inner(requested_address, size, alignment, exec, mem_tag);
119 
120   if (base != nullptr) {
121     return ReservedSpace(base, size, alignment, os::vm_page_size(), exec, false /* special */);
122   }
123 
124   // Failed
125   return {};
126 }
127 
128 ReservedSpace MemoryReserver::reserve_memory_special(char* requested_address,
129                                                      size_t size,
130                                                      size_t alignment,
131                                                      size_t page_size,
132                                                      bool exec) {
133   log_trace(pagesize)("Attempt special mapping: size: " EXACTFMT ", alignment: " EXACTFMT,
134                       EXACTFMTARGS(size),
135                       EXACTFMTARGS(alignment));
136 
137   char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
138 
139   if (base != nullptr) {
140     assert(is_aligned(base, alignment),
141            "reserve_memory_special() returned an unaligned address, "
142            "base: " PTR_FORMAT " alignment: 0x%zx",
143            p2i(base), alignment);
144 
145     return ReservedSpace(base, size, alignment, page_size, exec, true /* special */);
146   }
147 
148   // Failed
149   return {};
150 }
151 
152 ReservedSpace MemoryReserver::reserve(char* requested_address,
153                                       size_t size,
154                                       size_t alignment,
155                                       size_t page_size,
156                                       bool executable,
157                                       MemTag mem_tag) {
158   sanity_check_arguments(size, alignment, page_size);
159 
160   // Reserve the memory.
161 
162   // There are basically three different cases that we need to handle:
163   // 1. Mapping backed by a file
164   // 2. Mapping backed by explicit large pages
165   // 3. Mapping backed by normal pages or transparent huge pages
166   // The first two have restrictions that requires the whole mapping to be
167   // committed up front. To record this the ReservedSpace is marked 'special'.
168 
169   // == Case 1 ==
170   // This case is contained within the HeapReserver
171 
172   // == Case 2 ==
173   if (use_explicit_large_pages(page_size)) {
174     // System can't commit large pages i.e. use transparent huge pages and
175     // the caller requested large pages. To satisfy this request we use
176     // explicit large pages and these have to be committed up front to ensure
177     // no reservations are lost.
178     do {
179       ReservedSpace reserved = reserve_memory_special(requested_address, size, alignment, page_size, executable);
180       if (reserved.is_reserved()) {
181         // Successful reservation using large pages.
182         return reserved;
183       }
184       page_size = os::page_sizes().next_smaller(page_size);
185     } while (page_size > os::vm_page_size());
186 
187     // Failed to reserve explicit large pages, do proper logging.
188     log_on_large_pages_failure(requested_address, size);
189     // Now fall back to normal reservation.
190     assert(page_size == os::vm_page_size(), "inv");
191   }
192 
193   // == Case 3 ==
194   return reserve_memory(requested_address, size, alignment, executable, mem_tag);
195 }
196 
197 ReservedSpace MemoryReserver::reserve(char* requested_address,
198                                       size_t size,
199                                       size_t alignment,
200                                       size_t page_size,
201                                       MemTag mem_tag) {
202   return reserve(requested_address,
203                  size,
204                  alignment,
205                  page_size,
206                  !ExecMem,
207                  mem_tag);
208 }
209 
210 
211 ReservedSpace MemoryReserver::reserve(size_t size,
212                                       size_t alignment,
213                                       size_t page_size,
214                                       MemTag mem_tag) {
215   return reserve(nullptr /* requested_address */,
216                  size,
217                  alignment,
218                  page_size,
219                  mem_tag);
220 }
221 
222 ReservedSpace MemoryReserver::reserve(size_t size,
223                                       MemTag mem_tag) {
224   // Want to use large pages where possible. If the size is
225   // not large page aligned the mapping will be a mix of
226   // large and normal pages.
227   size_t page_size = os::page_size_for_region_unaligned(size, 1);
228   size_t alignment = os::vm_allocation_granularity();
229 
230   return reserve(size,
231                  alignment,
232                  page_size,
233                  mem_tag);
234 }
235 
236 bool MemoryReserver::release(const ReservedSpace& reserved) {
237   assert(reserved.is_reserved(), "Precondition");
238 
239   if (reserved.special()) {
240     return os::release_memory_special(reserved.base(), reserved.size());
241   } else {
242     return os::release_memory(reserved.base(), reserved.size());
243   }
244 }
245 
246 static char* map_memory_to_file(char* requested_address,
247                                 size_t size,
248                                 size_t alignment,
249                                 int fd,
250                                 MemTag mem_tag) {
251   // If the memory was requested at a particular address, use
252   // os::attempt_reserve_memory_at() to avoid mapping over something
253   // important.  If the reservation fails, return null.
254   if (requested_address != nullptr) {
255     assert(is_aligned(requested_address, alignment),
256            "Requested address " PTR_FORMAT " must be aligned to %zu",
257            p2i(requested_address), alignment);
258     return os::attempt_map_memory_to_file_at(requested_address, size, fd, mem_tag);
259   }
260 
261   // Optimistically assume that the OS returns an aligned base pointer.
262   // When reserving a large address range, most OSes seem to align to at
263   // least 64K.
264   char* base = os::map_memory_to_file(size, fd, mem_tag);
265   if (is_aligned(base, alignment)) {
266     return base;
267   }
268 
269 
270   // Base not aligned, retry.
271   if (!os::unmap_memory(base, size)) {
272     fatal("os::unmap_memory failed");
273   }
274 
275   // Map using the requested alignment.
276   return os::map_memory_to_file_aligned(size, alignment, fd, mem_tag);
277 }
278 
279 ReservedSpace FileMappedMemoryReserver::reserve(char* requested_address,
280                                                 size_t size,
281                                                 size_t alignment,
282                                                 int fd,
283                                                 MemTag mem_tag) {
284   sanity_check_size_and_alignment(size, alignment);
285 
286   char* base = map_memory_to_file(requested_address, size, alignment, fd, mem_tag);
287 
288   if (base != nullptr) {
289     return ReservedSpace(base, size, alignment, os::vm_page_size(), !ExecMem, true /* special */);
290   }
291 
292   // Failed
293   return {};
294 }
295 
296 ReservedSpace CodeMemoryReserver::reserve(size_t size,
297                                           size_t alignment,
298                                           size_t page_size) {
299   return MemoryReserver::reserve(nullptr /* requested_address */,
300                                  size,
301                                  alignment,
302                                  page_size,
303                                  ExecMem,
304                                  mtCode);
305 }
306 
307 ReservedHeapSpace HeapReserver::Instance::reserve_uncompressed_oops_heap(size_t size,
308                                                                          size_t alignment,
309                                                                          size_t page_size) {
310   ReservedSpace reserved = reserve_memory(size, alignment, page_size);
311 
312   if (reserved.is_reserved()) {
313     return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
314   }
315 
316   // Failed
317   return {};
318 }
319 
320 
321 static int maybe_create_file(const char* heap_allocation_directory) {
322   if (heap_allocation_directory == nullptr) {
323     return -1;
324   }
325 
326   int fd = os::create_file_for_heap(heap_allocation_directory);
327   if (fd == -1) {
328     vm_exit_during_initialization(
329         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
330   }
331 
332   return fd;
333 }
334 
335 HeapReserver::Instance::Instance(const char* heap_allocation_directory)
336   : _fd(maybe_create_file(heap_allocation_directory)) {}
337 
338 HeapReserver::Instance::~Instance() {
339   if (_fd != -1) {
340     ::close(_fd);
341   }
342 }
343 
344 ReservedSpace HeapReserver::Instance::reserve_memory(size_t size,
345                                                      size_t alignment,
346                                                      size_t page_size,
347                                                      char* requested_address) {
348 
349   // There are basically three different cases that we need to handle below:
350   // 1. Mapping backed by a file
351   // 2. Mapping backed by explicit large pages
352   // 3. Mapping backed by normal pages or transparent huge pages
353   // The first two have restrictions that requires the whole mapping to be
354   // committed up front. To record this the ReservedSpace is marked 'special'.
355 
356   // == Case 1 ==
357   if (_fd != -1) {
358     // When there is a backing file directory for this space then whether
359     // large pages are allocated is up to the filesystem of the backing file.
360     // So UseLargePages is not taken into account for this reservation.
361     //
362     // If requested, let the user know that explicit large pages can't be used.
363     if (use_explicit_large_pages(page_size) && large_pages_requested()) {
364       log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
365     }
366 
367     // Always return, not possible to fall back to reservation not using a file.
368     return FileMappedMemoryReserver::reserve(requested_address, size, alignment, _fd, mtJavaHeap);
369   }
370 
371   // == Case 2 & 3 ==
372   return MemoryReserver::reserve(requested_address, size, alignment, page_size, mtJavaHeap);
373 }
374 
375 // Compressed oop support is not relevant in 32bit builds.
376 #ifdef _LP64
377 
378 void HeapReserver::Instance::release(const ReservedSpace& reserved) {
379   if (reserved.is_reserved()) {
380     if (_fd == -1) {
381       if (reserved.special()) {
382         os::release_memory_special(reserved.base(), reserved.size());
383       } else{
384         os::release_memory(reserved.base(), reserved.size());
385       }
386     } else {
387       os::unmap_memory(reserved.base(), reserved.size());
388     }
389   }
390 }
391 
392 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
393 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
394 // might still fulfill the wishes of the caller.
395 // Assures the memory is aligned to 'alignment'.
396 ReservedSpace HeapReserver::Instance::try_reserve_memory(size_t size,
397                                                          size_t alignment,
398                                                          size_t page_size,
399                                                          char* requested_address) {
400   // Try to reserve the memory for the heap.
401   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
402                              " heap of size 0x%zx",
403                              p2i(requested_address),
404                              size);
405 
406   ReservedSpace reserved = reserve_memory(size, alignment, page_size, requested_address);
407 
408   if (reserved.is_reserved()) {
409     // Check alignment constraints.
410     assert(reserved.alignment() == alignment, "Unexpected");
411     assert(is_aligned(reserved.base(), alignment), "Unexpected");
412     return reserved;
413   }
414 
415   // Failed
416   return {};
417 }
418 
419 ReservedSpace HeapReserver::Instance::try_reserve_range(char *highest_start,
420                                                         char *lowest_start,
421                                                         size_t attach_point_alignment,
422                                                         char *aligned_heap_base_min_address,
423                                                         char *upper_bound,
424                                                         size_t size,
425                                                         size_t alignment,
426                                                         size_t page_size) {
427   assert(is_aligned(highest_start, attach_point_alignment), "precondition");
428   assert(is_aligned(lowest_start, attach_point_alignment), "precondition");
429 
430   const size_t attach_range = pointer_delta(highest_start, lowest_start, sizeof(char));
431   const size_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
432   const size_t num_attempts_to_try   = MIN2((size_t)HeapSearchSteps, num_attempts_possible);
433   const size_t num_intervals = num_attempts_to_try - 1;
434   const size_t stepsize = num_intervals == 0 ? 0 : align_down(attach_range / num_intervals, attach_point_alignment);
435 
436   for (size_t i = 0; i < num_attempts_to_try; ++i) {
437     char* const attach_point = highest_start - stepsize * i;
438     ReservedSpace reserved = try_reserve_memory(size, alignment, page_size, attach_point);
439 
440     if (reserved.is_reserved()) {
441       if (reserved.base() >= aligned_heap_base_min_address &&
442           size <= (uintptr_t)(upper_bound - reserved.base())) {
443         // Got a successful reservation.
444         return reserved;
445       }
446 
447       release(reserved);
448     }
449   }
450 
451   // Failed
452   return {};
453 }
454 
455 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
456 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
457 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
458 
459 // Helper for heap allocation. Returns an array with addresses
460 // (OS-specific) which are suited for disjoint base mode. Array is
461 // null terminated.
462 static char** get_attach_addresses_for_disjoint_mode() {
463   static uint64_t addresses[] = {
464      2 * SIZE_32G,
465      3 * SIZE_32G,
466      4 * SIZE_32G,
467      8 * SIZE_32G,
468     10 * SIZE_32G,
469      1 * SIZE_64K * SIZE_32G,
470      2 * SIZE_64K * SIZE_32G,
471      3 * SIZE_64K * SIZE_32G,
472      4 * SIZE_64K * SIZE_32G,
473     16 * SIZE_64K * SIZE_32G,
474     32 * SIZE_64K * SIZE_32G,
475     34 * SIZE_64K * SIZE_32G,
476     0
477   };
478 
479   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
480   // the array is sorted.
481   uint i = 0;
482   while (addresses[i] != 0 &&
483          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
484     i++;
485   }
486   uint start = i;
487 
488   // Avoid more steps than requested.
489   i = 0;
490   while (addresses[start+i] != 0) {
491     if (i == HeapSearchSteps) {
492       addresses[start+i] = 0;
493       break;
494     }
495     i++;
496   }
497 
498   return (char**) &addresses[start];
499 }
500 
501 // Create protection page at the beginning of the space.
502 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
503   assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
504   assert(reserved.is_reserved(), "should only be called on a reserved memory area");
505 
506   if (reserved.end() > (char *)OopEncodingHeapMax || UseCompatibleCompressedOops) {
507     assert((reserved.base() != nullptr), "sanity");
508     if (true
509         WIN64_ONLY(&& !UseLargePages)
510         AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
511       // Protect memory at the base of the allocated region.
512       if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
513         fatal("cannot protect protection page");
514       }
515       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
516                                  PTR_FORMAT " / %zd bytes",
517                                  p2i(reserved.base()),
518                                  noaccess_prefix);
519       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
520     } else {
521       CompressedOops::set_use_implicit_null_checks(false);
522     }
523   }
524 
525   return reserved.last_part(noaccess_prefix);
526 }
527 
528 ReservedHeapSpace HeapReserver::Instance::reserve_compressed_oops_heap(const size_t size, size_t alignment, size_t page_size) {
529   const size_t noaccess_prefix_size = lcm(os::vm_page_size(), alignment);
530   const size_t granularity = os::vm_allocation_granularity();
531 
532   assert(size + noaccess_prefix_size <= OopEncodingHeapMax,  "can not allocate compressed oop heap for this size");
533   assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
534 
535   assert(alignment >= os::vm_page_size(), "alignment too small");
536   assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
537   assert(is_power_of_2(alignment), "not a power of 2");
538 
539   // The necessary attach point alignment for generated wish addresses.
540   // This is needed to increase the chance of attaching for mmap and shmat.
541   // AIX is the only platform that uses System V shm for reserving virtual memory.
542   // In this case, the required alignment of the allocated size (64K) and the alignment
543   // of possible start points of the memory region (256M) differ.
544   // This is not reflected by os_allocation_granularity().
545   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
546   const size_t os_attach_point_alignment =
547     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
548     NOT_AIX(os::vm_allocation_granularity());
549 
550   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
551 
552   char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
553   char* heap_end_address = aligned_heap_base_min_address + size;
554 
555   bool unscaled  = false;
556   bool zerobased = false;
557   if (!UseCompatibleCompressedOops) { // heap base is not enforced
558     unscaled  = (heap_end_address <= (char*)UnscaledOopHeapMax);
559     zerobased = (heap_end_address <= (char*)OopEncodingHeapMax);
560   }
561   size_t noaccess_prefix = !zerobased ? noaccess_prefix_size : 0;
562 
563   ReservedSpace reserved{};
564 
565   // Attempt to alloc at user-given address.
566   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) || UseCompatibleCompressedOops) {
567     reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
568     if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
569       release(reserved);
570       reserved = {};
571     }
572   }
573 
574   // Keep heap at HeapBaseMinAddress.
575   if (!reserved.is_reserved()) {
576 
577     // Try to allocate the heap at addresses that allow efficient oop compression.
578     // Different schemes are tried, in order of decreasing optimization potential.
579     //
580     // For this, try_reserve_heap() is called with the desired heap base addresses.
581     // A call into the os layer to allocate at a given address can return memory
582     // at a different address than requested.  Still, this might be memory at a useful
583     // address. try_reserve_heap() always returns this allocated memory, as only here
584     // the criteria for a good heap are checked.
585 
586     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
587     // Give it several tries from top of range to bottom.
588     if (unscaled) {
589 
590       // Calc address range within we try to attach (range of possible start addresses).
591       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
592       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
593       reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
594                                    aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
595     }
596 
597     // zerobased: Attempt to allocate in the lower 32G.
598     char *zerobased_max = (char *)OopEncodingHeapMax;
599 
600     // Give it several tries from top of range to bottom.
601     if (zerobased &&                          // Zerobased theoretical possible.
602         ((!reserved.is_reserved()) ||         // No previous try succeeded.
603          (reserved.end() > zerobased_max))) { // Unscaled delivered an arbitrary address.
604 
605       // Release previous reservation
606       release(reserved);
607 
608       // Calc address range within we try to attach (range of possible start addresses).
609       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
610       // Need to be careful about size being guaranteed to be less
611       // than UnscaledOopHeapMax due to type constraints.
612       char *lowest_start = aligned_heap_base_min_address;
613       uint64_t unscaled_end = UnscaledOopHeapMax - size;
614       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
615         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
616       }
617       lowest_start = align_up(lowest_start, attach_point_alignment);
618       reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
619                                    aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
620     }
621 
622     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
623     // implement null checks.
624     noaccess_prefix = noaccess_prefix_size;
625 
626     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
627     char** addresses = get_attach_addresses_for_disjoint_mode();
628     int i = 0;
629     while ((addresses[i] != nullptr) &&       // End of array not yet reached.
630            ((!reserved.is_reserved()) ||      // No previous try succeeded.
631            (reserved.end() > zerobased_max && // Not zerobased or unscaled address.
632                                               // Not disjoint address.
633             !CompressedOops::is_disjoint_heap_base_address((address)reserved.base())))) {
634 
635       // Release previous reservation
636       release(reserved);
637 
638       char* const attach_point = addresses[i];
639       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
640       reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, attach_point);
641       i++;
642     }
643 
644     // Last, desperate try without any placement.
645     if (!reserved.is_reserved()) {
646       log_trace(gc, heap, coops)("Trying to allocate at address null heap of size 0x%zx", size + noaccess_prefix);
647       assert(alignment >= os::vm_page_size(), "Unexpected");
648       reserved = reserve_memory(size + noaccess_prefix, alignment, page_size);
649     }
650   }
651 
652   // No more reserve attempts
653 
654   if (reserved.is_reserved()) {
655     // Successfully found and reserved memory for the heap.
656 
657     if (reserved.size() > size) {
658       // We reserved heap memory with a noaccess prefix.
659 
660       assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
661       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
662       // if we had to try at arbitrary address.
663       reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
664       assert(reserved.size() == size, "Prefix should be gone");
665       return ReservedHeapSpace(reserved, noaccess_prefix);
666     }
667 
668     // We reserved heap memory without a noaccess prefix.
669     assert(!UseCompatibleCompressedOops, "noaccess prefix is missing");
670     return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
671   }
672 
673   // Failed
674   return {};
675 }
676 
677 #endif // _LP64
678 
679 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
680   if (UseCompressedOops) {
681 #ifdef _LP64
682     return reserve_compressed_oops_heap(size, alignment, page_size);
683 #endif
684   } else {
685     return reserve_uncompressed_oops_heap(size, alignment, page_size);
686   }
687 }
688 
689 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
690   sanity_check_arguments(size, alignment, page_size);
691 
692   assert(alignment != 0, "Precondition");
693   assert(is_aligned(size, alignment), "Precondition");
694 
695   Instance instance(heap_allocation_directory);
696 
697   return instance.reserve_heap(size, alignment, page_size);
698 }