1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "logging/log.hpp"
 26 #include "memory/memoryReserver.hpp"
 27 #include "oops/compressedOops.hpp"
 28 #include "oops/markWord.hpp"
 29 #include "runtime/globals_extension.hpp"
 30 #include "runtime/java.hpp"
 31 #include "runtime/os.inline.hpp"
 32 #include "utilities/formatBuffer.hpp"
 33 #include "utilities/globalDefinitions.hpp"
 34 #include "utilities/powerOfTwo.hpp"
 35 
 36 static void sanity_check_size_and_alignment(size_t size, size_t alignment) {
 37   assert(size > 0, "Precondition");
 38 
 39   DEBUG_ONLY(const size_t granularity = os::vm_allocation_granularity());
 40   assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
 41 
 42   assert(alignment >= granularity, "Must be set");
 43   assert(is_power_of_2(alignment), "not a power of 2");
 44   assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
 45 }
 46 
 47 static void sanity_check_page_size(size_t page_size) {
 48   assert(page_size >= os::vm_page_size(), "Invalid page size");
 49   assert(is_power_of_2(page_size), "Invalid page size");
 50 }
 51 
 52 static void sanity_check_arguments(size_t size, size_t alignment, size_t page_size) {
 53   sanity_check_size_and_alignment(size, alignment);
 54   sanity_check_page_size(page_size);
 55 }
 56 
 57 static bool large_pages_requested() {
 58   return UseLargePages &&
 59          (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
 60 }
 61 
 62 static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
 63   if (large_pages_requested()) {
 64     // Compressed oops logging.
 65     log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 66     // JVM style warning that we did not succeed in using large pages.
 67     warning("Failed to reserve and commit memory using large pages. "
 68             "req_addr: " PTR_FORMAT " bytes: %zu",
 69             p2i(req_addr), bytes);
 70   }
 71 }
 72 
 73 static bool use_explicit_large_pages(size_t page_size) {
 74   return !os::can_commit_large_page_memory() &&
 75          page_size != os::vm_page_size();
 76 }
 77 
 78 static char* reserve_memory_inner(char* requested_address,
 79                                   size_t size,
 80                                   size_t alignment,
 81                                   bool exec,
 82                                   MemTag mem_tag) {
 83   // If the memory was requested at a particular address, use
 84   // os::attempt_reserve_memory_at() to avoid mapping over something
 85   // important.  If the reservation fails, return null.
 86   if (requested_address != nullptr) {
 87     assert(is_aligned(requested_address, alignment),
 88            "Requested address " PTR_FORMAT " must be aligned to %zu",
 89            p2i(requested_address), alignment);
 90     return os::attempt_reserve_memory_at(requested_address, size, mem_tag, exec);
 91   }
 92 
 93   // Optimistically assume that the OS returns an aligned base pointer.
 94   // When reserving a large address range, most OSes seem to align to at
 95   // least 64K.
 96   char* base = os::reserve_memory(size, mem_tag, exec);
 97   if (is_aligned(base, alignment)) {
 98     return base;
 99   }
100 
101   // Base not aligned, retry.
102   if (!os::release_memory(base, size)) {
103     fatal("os::release_memory failed");
104   }
105 
106   // Map using the requested alignment.
107   return os::reserve_memory_aligned(size, alignment, mem_tag, exec);
108 }
109 
110 ReservedSpace MemoryReserver::reserve_memory(char* requested_address,
111                                              size_t size,
112                                              size_t alignment,
113                                              size_t page_size,
114                                              bool exec,
115                                              MemTag mem_tag) {
116   char* base = reserve_memory_inner(requested_address, size, alignment, exec, mem_tag);
117 
118   if (base != nullptr) {
119     return ReservedSpace(base, size, alignment, page_size, exec, false /* special */);
120   }
121 
122   // Failed
123   return {};
124 }
125 
126 ReservedSpace MemoryReserver::reserve_memory_special(char* requested_address,
127                                                      size_t size,
128                                                      size_t alignment,
129                                                      size_t page_size,
130                                                      bool exec) {
131   log_trace(pagesize)("Attempt special mapping: size: " EXACTFMT ", alignment: " EXACTFMT,
132                       EXACTFMTARGS(size),
133                       EXACTFMTARGS(alignment));
134 
135   char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
136 
137   if (base != nullptr) {
138     assert(is_aligned(base, alignment),
139            "reserve_memory_special() returned an unaligned address, "
140            "base: " PTR_FORMAT " alignment: 0x%zx",
141            p2i(base), alignment);
142 
143     return ReservedSpace(base, size, alignment, page_size, exec, true /* special */);
144   }
145 
146   // Failed
147   return {};
148 }
149 
150 ReservedSpace MemoryReserver::reserve(char* requested_address,
151                                       size_t size,
152                                       size_t alignment,
153                                       size_t page_size,
154                                       bool executable,
155                                       MemTag mem_tag) {
156   sanity_check_arguments(size, alignment, page_size);
157 
158   // Reserve the memory.
159 
160   // There are basically three different cases that we need to handle:
161   // 1. Mapping backed by a file
162   // 2. Mapping backed by explicit large pages
163   // 3. Mapping backed by normal pages or transparent huge pages
164   // The first two have restrictions that requires the whole mapping to be
165   // committed up front. To record this the ReservedSpace is marked 'special'.
166 
167   // == Case 1 ==
168   // This case is contained within the HeapReserver
169 
170   // == Case 2 ==
171   if (use_explicit_large_pages(page_size)) {
172     // System can't commit large pages i.e. use transparent huge pages and
173     // the caller requested large pages. To satisfy this request we use
174     // explicit large pages and these have to be committed up front to ensure
175     // no reservations are lost.
176     do {
177       ReservedSpace reserved = reserve_memory_special(requested_address, size, alignment, page_size, executable);
178       if (reserved.is_reserved()) {
179         // Successful reservation using large pages.
180         return reserved;
181       }
182       page_size = os::page_sizes().next_smaller(page_size);
183     } while (page_size > os::vm_page_size());
184 
185     // Failed to reserve explicit large pages, do proper logging.
186     log_on_large_pages_failure(requested_address, size);
187     // Now fall back to normal reservation.
188     assert(page_size == os::vm_page_size(), "inv");
189   }
190 
191   // == Case 3 ==
192   return reserve_memory(requested_address, size, alignment, page_size, executable, mem_tag);
193 }
194 
195 ReservedSpace MemoryReserver::reserve(char* requested_address,
196                                       size_t size,
197                                       size_t alignment,
198                                       size_t page_size,
199                                       MemTag mem_tag) {
200   return reserve(requested_address,
201                  size,
202                  alignment,
203                  page_size,
204                  !ExecMem,
205                  mem_tag);
206 }
207 
208 
209 ReservedSpace MemoryReserver::reserve(size_t size,
210                                       size_t alignment,
211                                       size_t page_size,
212                                       MemTag mem_tag) {
213   return reserve(nullptr /* requested_address */,
214                  size,
215                  alignment,
216                  page_size,
217                  mem_tag);
218 }
219 
220 ReservedSpace MemoryReserver::reserve(size_t size,
221                                       MemTag mem_tag) {
222   // Want to use large pages where possible. If the size is
223   // not large page aligned the mapping will be a mix of
224   // large and normal pages.
225   size_t page_size = os::page_size_for_region_unaligned(size, 1);
226   size_t alignment = os::vm_allocation_granularity();
227 
228   return reserve(size,
229                  alignment,
230                  page_size,
231                  mem_tag);
232 }
233 
234 bool MemoryReserver::release(const ReservedSpace& reserved) {
235   assert(reserved.is_reserved(), "Precondition");
236 
237   if (reserved.special()) {
238     return os::release_memory_special(reserved.base(), reserved.size());
239   } else {
240     return os::release_memory(reserved.base(), reserved.size());
241   }
242 }
243 
244 static char* map_memory_to_file(char* requested_address,
245                                 size_t size,
246                                 size_t alignment,
247                                 int fd,
248                                 MemTag mem_tag) {
249   // If the memory was requested at a particular address, use
250   // os::attempt_reserve_memory_at() to avoid mapping over something
251   // important.  If the reservation fails, return null.
252   if (requested_address != nullptr) {
253     assert(is_aligned(requested_address, alignment),
254            "Requested address " PTR_FORMAT " must be aligned to %zu",
255            p2i(requested_address), alignment);
256     return os::attempt_map_memory_to_file_at(requested_address, size, fd, mem_tag);
257   }
258 
259   // Optimistically assume that the OS returns an aligned base pointer.
260   // When reserving a large address range, most OSes seem to align to at
261   // least 64K.
262   char* base = os::map_memory_to_file(size, fd, mem_tag);
263   if (is_aligned(base, alignment)) {
264     return base;
265   }
266 
267 
268   // Base not aligned, retry.
269   if (!os::unmap_memory(base, size)) {
270     fatal("os::unmap_memory failed");
271   }
272 
273   // Map using the requested alignment.
274   return os::map_memory_to_file_aligned(size, alignment, fd, mem_tag);
275 }
276 
277 ReservedSpace FileMappedMemoryReserver::reserve(char* requested_address,
278                                                 size_t size,
279                                                 size_t alignment,
280                                                 int fd,
281                                                 MemTag mem_tag) {
282   sanity_check_size_and_alignment(size, alignment);
283 
284   char* base = map_memory_to_file(requested_address, size, alignment, fd, mem_tag);
285 
286   if (base != nullptr) {
287     return ReservedSpace(base, size, alignment, os::vm_page_size(), !ExecMem, true /* special */);
288   }
289 
290   // Failed
291   return {};
292 }
293 
294 ReservedSpace CodeMemoryReserver::reserve(size_t size,
295                                           size_t alignment,
296                                           size_t page_size) {
297   return MemoryReserver::reserve(nullptr /* requested_address */,
298                                  size,
299                                  alignment,
300                                  page_size,
301                                  ExecMem,
302                                  mtCode);
303 }
304 
305 ReservedHeapSpace HeapReserver::Instance::reserve_uncompressed_oops_heap(size_t size,
306                                                                          size_t alignment,
307                                                                          size_t page_size) {
308   ReservedSpace reserved = reserve_memory(size, alignment, page_size);
309 
310   if (reserved.is_reserved()) {
311     return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
312   }
313 
314   // Failed
315   return {};
316 }
317 
318 
319 static int maybe_create_file(const char* heap_allocation_directory) {
320   if (heap_allocation_directory == nullptr) {
321     return -1;
322   }
323 
324   int fd = os::create_file_for_heap(heap_allocation_directory);
325   if (fd == -1) {
326     vm_exit_during_initialization(
327         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
328   }
329 
330   return fd;
331 }
332 
333 HeapReserver::Instance::Instance(const char* heap_allocation_directory)
334   : _fd(maybe_create_file(heap_allocation_directory)) {}
335 
336 HeapReserver::Instance::~Instance() {
337   if (_fd != -1) {
338     ::close(_fd);
339   }
340 }
341 
342 ReservedSpace HeapReserver::Instance::reserve_memory(size_t size,
343                                                      size_t alignment,
344                                                      size_t page_size,
345                                                      char* requested_address) {
346 
347   // There are basically three different cases that we need to handle below:
348   // 1. Mapping backed by a file
349   // 2. Mapping backed by explicit large pages
350   // 3. Mapping backed by normal pages or transparent huge pages
351   // The first two have restrictions that requires the whole mapping to be
352   // committed up front. To record this the ReservedSpace is marked 'special'.
353 
354   // == Case 1 ==
355   if (_fd != -1) {
356     // When there is a backing file directory for this space then whether
357     // large pages are allocated is up to the filesystem of the backing file.
358     // So UseLargePages is not taken into account for this reservation.
359     //
360     // If requested, let the user know that explicit large pages can't be used.
361     if (use_explicit_large_pages(page_size) && large_pages_requested()) {
362       log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
363     }
364 
365     // Always return, not possible to fall back to reservation not using a file.
366     return FileMappedMemoryReserver::reserve(requested_address, size, alignment, _fd, mtJavaHeap);
367   }
368 
369   // == Case 2 & 3 ==
370   return MemoryReserver::reserve(requested_address, size, alignment, page_size, mtJavaHeap);
371 }
372 
373 // Compressed oop support is not relevant in 32bit builds.
374 #ifdef _LP64
375 
376 void HeapReserver::Instance::release(const ReservedSpace& reserved) {
377   if (reserved.is_reserved()) {
378     if (_fd == -1) {
379       if (reserved.special()) {
380         os::release_memory_special(reserved.base(), reserved.size());
381       } else{
382         os::release_memory(reserved.base(), reserved.size());
383       }
384     } else {
385       os::unmap_memory(reserved.base(), reserved.size());
386     }
387   }
388 }
389 
390 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
391 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
392 // might still fulfill the wishes of the caller.
393 // Assures the memory is aligned to 'alignment'.
394 ReservedSpace HeapReserver::Instance::try_reserve_memory(size_t size,
395                                                          size_t alignment,
396                                                          size_t page_size,
397                                                          char* requested_address) {
398   // Try to reserve the memory for the heap.
399   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
400                              " heap of size 0x%zx",
401                              p2i(requested_address),
402                              size);
403 
404   ReservedSpace reserved = reserve_memory(size, alignment, page_size, requested_address);
405 
406   if (reserved.is_reserved()) {
407     // Check alignment constraints.
408     assert(reserved.alignment() == alignment, "Unexpected");
409     assert(is_aligned(reserved.base(), alignment), "Unexpected");
410     return reserved;
411   }
412 
413   // Failed
414   return {};
415 }
416 
417 ReservedSpace HeapReserver::Instance::try_reserve_range(char *highest_start,
418                                                         char *lowest_start,
419                                                         size_t attach_point_alignment,
420                                                         char *aligned_heap_base_min_address,
421                                                         char *upper_bound,
422                                                         size_t size,
423                                                         size_t alignment,
424                                                         size_t page_size) {
425   assert(is_aligned(highest_start, attach_point_alignment), "precondition");
426   assert(is_aligned(lowest_start, attach_point_alignment), "precondition");
427 
428   const size_t attach_range = pointer_delta(highest_start, lowest_start, sizeof(char));
429   const size_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
430   const size_t num_attempts_to_try   = MIN2((size_t)HeapSearchSteps, num_attempts_possible);
431   const size_t num_intervals = num_attempts_to_try - 1;
432   const size_t stepsize = num_intervals == 0 ? 0 : align_down(attach_range / num_intervals, attach_point_alignment);
433 
434   for (size_t i = 0; i < num_attempts_to_try; ++i) {
435     char* const attach_point = highest_start - stepsize * i;
436     ReservedSpace reserved = try_reserve_memory(size, alignment, page_size, attach_point);
437 
438     if (reserved.is_reserved()) {
439       if (reserved.base() >= aligned_heap_base_min_address &&
440           size <= (uintptr_t)(upper_bound - reserved.base())) {
441         // Got a successful reservation.
442         return reserved;
443       }
444 
445       release(reserved);
446     }
447   }
448 
449   // Failed
450   return {};
451 }
452 
453 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
454 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
455 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
456 
457 // Helper for heap allocation. Returns an array with addresses
458 // (OS-specific) which are suited for disjoint base mode. Array is
459 // null terminated.
460 static char** get_attach_addresses_for_disjoint_mode() {
461   static uint64_t addresses[] = {
462      2 * SIZE_32G,
463      3 * SIZE_32G,
464      4 * SIZE_32G,
465      8 * SIZE_32G,
466     10 * SIZE_32G,
467      1 * SIZE_64K * SIZE_32G,
468      2 * SIZE_64K * SIZE_32G,
469      3 * SIZE_64K * SIZE_32G,
470      4 * SIZE_64K * SIZE_32G,
471     16 * SIZE_64K * SIZE_32G,
472     32 * SIZE_64K * SIZE_32G,
473     34 * SIZE_64K * SIZE_32G,
474     0
475   };
476 
477   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
478   // the array is sorted.
479   uint i = 0;
480   while (addresses[i] != 0 &&
481          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
482     i++;
483   }
484   uint start = i;
485 
486   // Avoid more steps than requested.
487   i = 0;
488   while (addresses[start+i] != 0) {
489     if (i == HeapSearchSteps) {
490       addresses[start+i] = 0;
491       break;
492     }
493     i++;
494   }
495 
496   return (char**) &addresses[start];
497 }
498 
499 // Create protection page at the beginning of the space.
500 static ReservedSpace establish_noaccess_prefix(const ReservedSpace& reserved, size_t noaccess_prefix) {
501   assert(reserved.alignment() >= os::vm_page_size(), "must be at least page size big");
502   assert(reserved.is_reserved(), "should only be called on a reserved memory area");
503 
504   if (reserved.end() > (char *)OopEncodingHeapMax || UseCompatibleCompressedOops) {
505     assert((reserved.base() != nullptr), "sanity");
506     if (true
507         WIN64_ONLY(&& !UseLargePages)
508         AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
509       // Protect memory at the base of the allocated region.
510       if (!os::protect_memory(reserved.base(), noaccess_prefix, os::MEM_PROT_NONE, reserved.special())) {
511         fatal("cannot protect protection page");
512       }
513       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
514                                  PTR_FORMAT " / %zd bytes",
515                                  p2i(reserved.base()),
516                                  noaccess_prefix);
517       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
518     } else {
519       CompressedOops::set_use_implicit_null_checks(false);
520     }
521   }
522 
523   return reserved.last_part(noaccess_prefix);
524 }
525 
526 ReservedHeapSpace HeapReserver::Instance::reserve_compressed_oops_heap(const size_t size, size_t alignment, size_t page_size) {
527   const size_t noaccess_prefix_size = lcm(os::vm_page_size(), alignment);
528   const size_t granularity = os::vm_allocation_granularity();
529 
530   assert(size + noaccess_prefix_size <= OopEncodingHeapMax,  "can not allocate compressed oop heap for this size");
531   assert(is_aligned(size, granularity), "size not aligned to os::vm_allocation_granularity()");
532 
533   assert(alignment >= os::vm_page_size(), "alignment too small");
534   assert(is_aligned(alignment, granularity), "alignment not aligned to os::vm_allocation_granularity()");
535   assert(is_power_of_2(alignment), "not a power of 2");
536 
537   // The necessary attach point alignment for generated wish addresses.
538   // This is needed to increase the chance of attaching for mmap and shmat.
539   // AIX is the only platform that uses System V shm for reserving virtual memory.
540   // In this case, the required alignment of the allocated size (64K) and the alignment
541   // of possible start points of the memory region (256M) differ.
542   // This is not reflected by os_allocation_granularity().
543   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
544   const size_t os_attach_point_alignment =
545     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
546     NOT_AIX(os::vm_allocation_granularity());
547 
548   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
549 
550   char* aligned_heap_base_min_address = align_up((char*)HeapBaseMinAddress, alignment);
551   char* heap_end_address = aligned_heap_base_min_address + size;
552 
553   bool unscaled  = false;
554   bool zerobased = false;
555   if (!UseCompatibleCompressedOops) { // heap base is not enforced
556     unscaled  = (heap_end_address <= (char*)UnscaledOopHeapMax);
557     zerobased = (heap_end_address <= (char*)OopEncodingHeapMax);
558   }
559   size_t noaccess_prefix = !zerobased ? noaccess_prefix_size : 0;
560 
561   ReservedSpace reserved{};
562 
563   // Attempt to alloc at user-given address.
564   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) || UseCompatibleCompressedOops) {
565     reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
566     if (reserved.base() != aligned_heap_base_min_address) { // Enforce this exact address.
567       release(reserved);
568       reserved = {};
569     }
570   }
571 
572   // Keep heap at HeapBaseMinAddress.
573   if (!reserved.is_reserved()) {
574 
575     // Try to allocate the heap at addresses that allow efficient oop compression.
576     // Different schemes are tried, in order of decreasing optimization potential.
577     //
578     // For this, try_reserve_heap() is called with the desired heap base addresses.
579     // A call into the os layer to allocate at a given address can return memory
580     // at a different address than requested.  Still, this might be memory at a useful
581     // address. try_reserve_heap() always returns this allocated memory, as only here
582     // the criteria for a good heap are checked.
583 
584     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
585     // Give it several tries from top of range to bottom.
586     if (unscaled) {
587 
588       // Calc address range within we try to attach (range of possible start addresses).
589       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
590       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
591       reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
592                                    aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
593     }
594 
595     // zerobased: Attempt to allocate in the lower 32G.
596     char *zerobased_max = (char *)OopEncodingHeapMax;
597 
598     // Give it several tries from top of range to bottom.
599     if (zerobased &&                          // Zerobased theoretical possible.
600         ((!reserved.is_reserved()) ||         // No previous try succeeded.
601          (reserved.end() > zerobased_max))) { // Unscaled delivered an arbitrary address.
602 
603       // Release previous reservation
604       release(reserved);
605 
606       // Calc address range within we try to attach (range of possible start addresses).
607       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
608       // Need to be careful about size being guaranteed to be less
609       // than UnscaledOopHeapMax due to type constraints.
610       char *lowest_start = aligned_heap_base_min_address;
611       uint64_t unscaled_end = UnscaledOopHeapMax - size;
612       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
613         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
614       }
615       lowest_start = align_up(lowest_start, attach_point_alignment);
616       reserved = try_reserve_range(highest_start, lowest_start, attach_point_alignment,
617                                    aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
618     }
619 
620     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
621     // implement null checks.
622     noaccess_prefix = noaccess_prefix_size;
623 
624     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
625     char** addresses = get_attach_addresses_for_disjoint_mode();
626     int i = 0;
627     while ((addresses[i] != nullptr) &&       // End of array not yet reached.
628            ((!reserved.is_reserved()) ||      // No previous try succeeded.
629            (reserved.end() > zerobased_max && // Not zerobased or unscaled address.
630                                               // Not disjoint address.
631             !CompressedOops::is_disjoint_heap_base_address((address)reserved.base())))) {
632 
633       // Release previous reservation
634       release(reserved);
635 
636       char* const attach_point = addresses[i];
637       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
638       reserved = try_reserve_memory(size + noaccess_prefix, alignment, page_size, attach_point);
639       i++;
640     }
641 
642     // Last, desperate try without any placement.
643     if (!reserved.is_reserved()) {
644       log_trace(gc, heap, coops)("Trying to allocate at address null heap of size 0x%zx", size + noaccess_prefix);
645       assert(alignment >= os::vm_page_size(), "Unexpected");
646       reserved = reserve_memory(size + noaccess_prefix, alignment, page_size);
647     }
648   }
649 
650   // No more reserve attempts
651 
652   if (reserved.is_reserved()) {
653     // Successfully found and reserved memory for the heap.
654 
655     if (reserved.size() > size) {
656       // We reserved heap memory with a noaccess prefix.
657 
658       assert(reserved.size() == size + noaccess_prefix, "Prefix should be included");
659       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
660       // if we had to try at arbitrary address.
661       reserved = establish_noaccess_prefix(reserved, noaccess_prefix);
662       assert(reserved.size() == size, "Prefix should be gone");
663       return ReservedHeapSpace(reserved, noaccess_prefix);
664     }
665 
666     // We reserved heap memory without a noaccess prefix.
667     assert(!UseCompatibleCompressedOops, "noaccess prefix is missing");
668     return ReservedHeapSpace(reserved, 0 /* noaccess_prefix */);
669   }
670 
671   // Failed
672   return {};
673 }
674 
675 #endif // _LP64
676 
677 ReservedHeapSpace HeapReserver::Instance::reserve_heap(size_t size, size_t alignment, size_t page_size) {
678   if (UseCompressedOops) {
679 #ifdef _LP64
680     return reserve_compressed_oops_heap(size, alignment, page_size);
681 #endif
682   } else {
683     return reserve_uncompressed_oops_heap(size, alignment, page_size);
684   }
685 }
686 
687 ReservedHeapSpace HeapReserver::reserve(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) {
688   sanity_check_arguments(size, alignment, page_size);
689 
690   assert(alignment != 0, "Precondition");
691   assert(is_aligned(size, alignment), "Precondition");
692 
693   Instance instance(heap_allocation_directory);
694 
695   return instance.reserve_heap(size, alignment, page_size);
696 }