1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "nmt/memTracker.hpp"
  30 #include "oops/compressedKlass.hpp"
  31 #include "oops/compressedOops.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/java.hpp"
  36 #include "runtime/os.hpp"
  37 #include "utilities/align.hpp"
  38 #include "utilities/formatBuffer.hpp"
  39 #include "utilities/powerOfTwo.hpp"
  40 
  41 // ReservedSpace
  42 
  43 // Dummy constructor
  44 ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0),
  45     _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
  46 }
  47 
  48 ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
  49   // Want to use large pages where possible. If the size is
  50   // not large page aligned the mapping will be a mix of
  51   // large and normal pages.
  52   size_t page_size = os::page_size_for_region_unaligned(size, 1);
  53   size_t alignment = os::vm_allocation_granularity();
  54   initialize(size, alignment, page_size, nullptr, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  58   // When a page size is given we don't want to mix large
  59   // and normal pages. If the size is not a multiple of the
  60   // page size it will be aligned up to achieve this.
  61   size_t alignment = os::vm_allocation_granularity();;
  62   if (preferred_page_size != os::vm_page_size()) {
  63     alignment = MAX2(preferred_page_size, alignment);
  64     size = align_up(size, alignment);
  65   }
  66   initialize(size, alignment, preferred_page_size, nullptr, false);
  67 }
  68 
  69 ReservedSpace::ReservedSpace(size_t size,
  70                              size_t alignment,
  71                              size_t page_size,
  72                              char* requested_address) : _fd_for_heap(-1) {
  73   initialize(size, alignment, page_size, requested_address, false);
  74 }
  75 
  76 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size,
  77                              bool special, bool executable) : _fd_for_heap(-1) {
  78   assert((size % os::vm_allocation_granularity()) == 0,
  79          "size not allocation aligned");
  80   initialize_members(base, size, alignment, page_size, special, executable);
  81 }
  82 
  83 // Helper method
  84 static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) {
  85   if (fd != -1) {
  86     return os::attempt_map_memory_to_file_at(base, size, fd);
  87   }
  88   return os::attempt_reserve_memory_at(base, size, executable);
  89 }
  90 
  91 // Helper method
  92 static char* map_or_reserve_memory(size_t size, int fd, bool executable) {
  93   if (fd != -1) {
  94     return os::map_memory_to_file(size, fd);
  95   }
  96   return os::reserve_memory(size, executable);
  97 }
  98 
  99 // Helper method
 100 static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) {
 101   if (fd != -1) {
 102     return os::map_memory_to_file_aligned(size, alignment, fd);
 103   }
 104   return os::reserve_memory_aligned(size, alignment, executable);
 105 }
 106 
 107 // Helper method
 108 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
 109   if (is_file_mapped) {
 110     if (!os::unmap_memory(base, size)) {
 111       fatal("os::unmap_memory failed");
 112     }
 113   } else if (!os::release_memory(base, size)) {
 114     fatal("os::release_memory failed");
 115   }
 116 }
 117 
 118 // Helper method
 119 static bool failed_to_reserve_as_requested(char* base, char* requested_address) {
 120   if (base == requested_address || requested_address == nullptr) {
 121     return false; // did not fail
 122   }
 123 
 124   if (base != nullptr) {
 125     // Different reserve address may be acceptable in other cases
 126     // but for compressed oops heap should be at requested address.
 127     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 128     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 129   }
 130   return true;
 131 }
 132 
 133 static bool use_explicit_large_pages(size_t page_size) {
 134   return !os::can_commit_large_page_memory() &&
 135          page_size != os::vm_page_size();
 136 }
 137 
 138 static bool large_pages_requested() {
 139   return UseLargePages &&
 140          (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
 141 }
 142 
 143 static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
 144   if (large_pages_requested()) {
 145     // Compressed oops logging.
 146     log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 147     // JVM style warning that we did not succeed in using large pages.
 148     char msg[128];
 149     jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory using large pages. "
 150                                    "req_addr: " PTR_FORMAT " bytes: " SIZE_FORMAT,
 151                                    req_addr, bytes);
 152     warning("%s", msg);
 153   }
 154 }
 155 
 156 static char* reserve_memory(char* requested_address, const size_t size,
 157                             const size_t alignment, int fd, bool exec) {
 158   char* base;
 159   // If the memory was requested at a particular address, use
 160   // os::attempt_reserve_memory_at() to avoid mapping over something
 161   // important.  If the reservation fails, return null.
 162   if (requested_address != nullptr) {
 163     assert(is_aligned(requested_address, alignment),
 164            "Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
 165            p2i(requested_address), alignment);
 166     base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec);
 167   } else {
 168     // Optimistically assume that the OS returns an aligned base pointer.
 169     // When reserving a large address range, most OSes seem to align to at
 170     // least 64K.
 171     base = map_or_reserve_memory(size, fd, exec);
 172     // Check alignment constraints. This is only needed when there is
 173     // no requested address.
 174     if (!is_aligned(base, alignment)) {
 175       // Base not aligned, retry.
 176       unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
 177       // Map using the requested alignment.
 178       base = map_or_reserve_memory_aligned(size, alignment, fd, exec);
 179     }
 180   }
 181 
 182   return base;
 183 }
 184 
 185 static char* reserve_memory_special(char* requested_address, const size_t size,
 186                                     const size_t alignment, const size_t page_size, bool exec) {
 187 
 188   log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, "
 189                       "alignment: " SIZE_FORMAT "%s",
 190                       byte_size_in_exact_unit(size), exact_unit_for_byte_size(size),
 191                       byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
 192 
 193   char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
 194   if (base != nullptr) {
 195     // Check alignment constraints.
 196     assert(is_aligned(base, alignment),
 197            "reserve_memory_special() returned an unaligned address, base: " PTR_FORMAT
 198            " alignment: " SIZE_FORMAT_X,
 199            p2i(base), alignment);
 200   }
 201   return base;
 202 }
 203 
 204 void ReservedSpace::clear_members() {
 205   initialize_members(nullptr, 0, 0, 0, false, false);
 206 }
 207 
 208 void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment,
 209                                        size_t page_size, bool special, bool executable) {
 210   _base = base;
 211   _size = size;
 212   _alignment = alignment;
 213   _page_size = page_size;
 214   _special = special;
 215   _executable = executable;
 216   _noaccess_prefix = 0;
 217 }
 218 
 219 void ReservedSpace::reserve(size_t size,
 220                             size_t alignment,
 221                             size_t page_size,
 222                             char* requested_address,
 223                             bool executable) {
 224   assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment");
 225 
 226   // There are basically three different cases that we need to handle below:
 227   // 1. Mapping backed by a file
 228   // 2. Mapping backed by explicit large pages
 229   // 3. Mapping backed by normal pages or transparent huge pages
 230   // The first two have restrictions that requires the whole mapping to be
 231   // committed up front. To record this the ReservedSpace is marked 'special'.
 232 
 233   // == Case 1 ==
 234   if (_fd_for_heap != -1) {
 235     // When there is a backing file directory for this space then whether
 236     // large pages are allocated is up to the filesystem of the backing file.
 237     // So UseLargePages is not taken into account for this reservation.
 238     char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
 239     if (base != nullptr) {
 240       initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
 241     }
 242     // Always return, not possible to fall back to reservation not using a file.
 243     return;
 244   }
 245 
 246   // == Case 2 ==
 247   if (use_explicit_large_pages(page_size)) {
 248     // System can't commit large pages i.e. use transparent huge pages and
 249     // the caller requested large pages. To satisfy this request we use
 250     // explicit large pages and these have to be committed up front to ensure
 251     // no reservations are lost.
 252     do {
 253       char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable);
 254       if (base != nullptr) {
 255         // Successful reservation using large pages.
 256         initialize_members(base, size, alignment, page_size, true, executable);
 257         return;
 258       }
 259       page_size = os::page_sizes().next_smaller(page_size);
 260     } while (page_size > os::vm_page_size());
 261 
 262     // Failed to reserve explicit large pages, do proper logging.
 263     log_on_large_pages_failure(requested_address, size);
 264     // Now fall back to normal reservation.
 265     assert(page_size == os::vm_page_size(), "inv");
 266   }
 267 
 268   // == Case 3 ==
 269   char* base = reserve_memory(requested_address, size, alignment, -1, executable);
 270   if (base != nullptr) {
 271     // Successful mapping.
 272     initialize_members(base, size, alignment, page_size, false, executable);
 273   }
 274 }
 275 
 276 void ReservedSpace::initialize(size_t size,
 277                                size_t alignment,
 278                                size_t page_size,
 279                                char* requested_address,
 280                                bool executable) {
 281   const size_t granularity = os::vm_allocation_granularity();
 282   assert((size & (granularity - 1)) == 0,
 283          "size not aligned to os::vm_allocation_granularity()");
 284   assert((alignment & (granularity - 1)) == 0,
 285          "alignment not aligned to os::vm_allocation_granularity()");
 286   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 287          "not a power of 2");
 288   assert(page_size >= os::vm_page_size(), "Invalid page size");
 289   assert(is_power_of_2(page_size), "Invalid page size");
 290 
 291   clear_members();
 292 
 293   if (size == 0) {
 294     return;
 295   }
 296 
 297   // Adjust alignment to not be 0.
 298   alignment = MAX2(alignment, os::vm_page_size());
 299 
 300   // Reserve the memory.
 301   reserve(size, alignment, page_size, requested_address, executable);
 302 
 303   // Check that the requested address is used if given.
 304   if (failed_to_reserve_as_requested(_base, requested_address)) {
 305     // OS ignored the requested address, release the reservation.
 306     release();
 307     return;
 308   }
 309 }
 310 
 311 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) {
 312   assert(partition_size <= size(), "partition failed");
 313   ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable());
 314   return result;
 315 }
 316 
 317 ReservedSpace ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 318   assert(partition_size <= size(), "partition failed");
 319   ReservedSpace result(base() + partition_size, size() - partition_size,
 320                        alignment, page_size(), special(), executable());
 321   return result;
 322 }
 323 
 324 ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size, size_t alignment) {
 325   assert(offset + partition_size <= size(), "partition failed");
 326   ReservedSpace result(base() + offset, partition_size, alignment, page_size(), special(), executable());
 327   return result;
 328 }
 329 
 330 size_t ReservedSpace::page_align_size_up(size_t size) {
 331   return align_up(size, os::vm_page_size());
 332 }
 333 
 334 
 335 size_t ReservedSpace::page_align_size_down(size_t size) {
 336   return align_down(size, os::vm_page_size());
 337 }
 338 
 339 
 340 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 341   return align_up(size, os::vm_allocation_granularity());
 342 }
 343 
 344 void ReservedSpace::release() {
 345   if (is_reserved()) {
 346     char *real_base = _base - _noaccess_prefix;
 347     const size_t real_size = _size + _noaccess_prefix;
 348     if (special()) {
 349       if (_fd_for_heap != -1) {
 350         os::unmap_memory(real_base, real_size);
 351       } else {
 352         os::release_memory_special(real_base, real_size);
 353       }
 354     } else{
 355       os::release_memory(real_base, real_size);
 356     }
 357     clear_members();
 358   }
 359 }
 360 
 361 // Put a ReservedSpace over an existing range
 362 ReservedSpace ReservedSpace::space_for_range(char* base, size_t size, size_t alignment,
 363                                              size_t page_size, bool special, bool executable) {
 364   assert(is_aligned(base, os::vm_allocation_granularity()), "Unaligned base");
 365   assert(is_aligned(size, os::vm_page_size()), "Unaligned size");
 366   assert(os::page_sizes().contains(page_size), "Invalid pagesize");
 367   ReservedSpace space;
 368   space.initialize_members(base, size, alignment, page_size, special, executable);
 369   return space;
 370 }
 371 
 372 // Compressed oop support is not relevant in 32bit builds.
 373 #ifdef _LP64
 374 
 375 static size_t noaccess_prefix_size(size_t alignment) {
 376   return lcm(os::vm_page_size(), alignment);
 377 }
 378 
 379 void ReservedHeapSpace::establish_noaccess_prefix() {
 380   assert(_alignment >= os::vm_page_size(), "must be at least page size big");
 381   _noaccess_prefix = noaccess_prefix_size(_alignment);
 382 
 383   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 384     if (true
 385         WIN64_ONLY(&& !UseLargePages)
 386         AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
 387       // Protect memory at the base of the allocated region.
 388       // If special, the page was committed (only matters on windows)
 389       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 390         fatal("cannot protect protection page");
 391       }
 392       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 393                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 394                                  p2i(_base),
 395                                  _noaccess_prefix);
 396       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
 397     } else {
 398       CompressedOops::set_use_implicit_null_checks(false);
 399     }
 400   }
 401 
 402   _base += _noaccess_prefix;
 403   _size -= _noaccess_prefix;
 404   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 405 }
 406 
 407 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 408 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 409 // might still fulfill the wishes of the caller.
 410 // Assures the memory is aligned to 'alignment'.
 411 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 412 void ReservedHeapSpace::try_reserve_heap(size_t size,
 413                                          size_t alignment,
 414                                          size_t page_size,
 415                                          char* requested_address) {
 416   if (_base != nullptr) {
 417     // We tried before, but we didn't like the address delivered.
 418     release();
 419   }
 420 
 421   // Try to reserve the memory for the heap.
 422   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 423                              " heap of size " SIZE_FORMAT_X,
 424                              p2i(requested_address),
 425                              size);
 426 
 427   reserve(size, alignment, page_size, requested_address, false);
 428 
 429   // Check alignment constraints.
 430   if (is_reserved() && !is_aligned(_base, _alignment)) {
 431     // Base not aligned, retry.
 432     release();
 433   }
 434 }
 435 
 436 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 437                                           char *lowest_start,
 438                                           size_t attach_point_alignment,
 439                                           char *aligned_heap_base_min_address,
 440                                           char *upper_bound,
 441                                           size_t size,
 442                                           size_t alignment,
 443                                           size_t page_size) {
 444   const size_t attach_range = highest_start - lowest_start;
 445   // Cap num_attempts at possible number.
 446   // At least one is possible even for 0 sized attach range.
 447   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 448   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 449 
 450   const size_t stepsize = (attach_range == 0) ? // Only one try.
 451     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 452 
 453   // Try attach points from top to bottom.
 454   char* attach_point = highest_start;
 455   while (attach_point >= lowest_start  &&
 456          attach_point <= highest_start &&  // Avoid wrap around.
 457          ((_base == nullptr) ||
 458           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 459     try_reserve_heap(size, alignment, page_size, attach_point);
 460     attach_point -= stepsize;
 461   }
 462 }
 463 
 464 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 465 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 466 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 467 
 468 // Helper for heap allocation. Returns an array with addresses
 469 // (OS-specific) which are suited for disjoint base mode. Array is
 470 // null terminated.
 471 static char** get_attach_addresses_for_disjoint_mode() {
 472   static uint64_t addresses[] = {
 473      2 * SIZE_32G,
 474      3 * SIZE_32G,
 475      4 * SIZE_32G,
 476      8 * SIZE_32G,
 477     10 * SIZE_32G,
 478      1 * SIZE_64K * SIZE_32G,
 479      2 * SIZE_64K * SIZE_32G,
 480      3 * SIZE_64K * SIZE_32G,
 481      4 * SIZE_64K * SIZE_32G,
 482     16 * SIZE_64K * SIZE_32G,
 483     32 * SIZE_64K * SIZE_32G,
 484     34 * SIZE_64K * SIZE_32G,
 485     0
 486   };
 487 
 488   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 489   // the array is sorted.
 490   uint i = 0;
 491   while (addresses[i] != 0 &&
 492          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 493     i++;
 494   }
 495   uint start = i;
 496 
 497   // Avoid more steps than requested.
 498   i = 0;
 499   while (addresses[start+i] != 0) {
 500     if (i == HeapSearchSteps) {
 501       addresses[start+i] = 0;
 502       break;
 503     }
 504     i++;
 505   }
 506 
 507   return (char**) &addresses[start];
 508 }
 509 
 510 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) {
 511   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 512             "can not allocate compressed oop heap for this size");
 513   guarantee(alignment == MAX2(alignment, os::vm_page_size()), "alignment too small");
 514 
 515   const size_t granularity = os::vm_allocation_granularity();
 516   assert((size & (granularity - 1)) == 0,
 517          "size not aligned to os::vm_allocation_granularity()");
 518   assert((alignment & (granularity - 1)) == 0,
 519          "alignment not aligned to os::vm_allocation_granularity()");
 520   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 521          "not a power of 2");
 522 
 523   // The necessary attach point alignment for generated wish addresses.
 524   // This is needed to increase the chance of attaching for mmap and shmat.
 525   // AIX is the only platform that uses System V shm for reserving virtual memory.
 526   // In this case, the required alignment of the allocated size (64K) and the alignment
 527   // of possible start points of the memory region (256M) differ.
 528   // This is not reflected by os_allocation_granularity().
 529   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
 530   const size_t os_attach_point_alignment =
 531     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
 532     NOT_AIX(os::vm_allocation_granularity());
 533 
 534   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 535 
 536   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 537   size_t noaccess_prefix = (((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) LP64_ONLY(|| UseCompatibleCompressedOops)) ?
 538     noaccess_prefix_size(alignment) : 0;
 539 
 540   // Attempt to alloc at user-given address.
 541   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) LP64_ONLY(|| UseCompatibleCompressedOops)) {
 542     try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
 543     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 544       release();
 545     }
 546   }
 547 
 548   // Keep heap at HeapBaseMinAddress.
 549   if (_base == nullptr) {
 550 
 551     // Try to allocate the heap at addresses that allow efficient oop compression.
 552     // Different schemes are tried, in order of decreasing optimization potential.
 553     //
 554     // For this, try_reserve_heap() is called with the desired heap base addresses.
 555     // A call into the os layer to allocate at a given address can return memory
 556     // at a different address than requested.  Still, this might be memory at a useful
 557     // address. try_reserve_heap() always returns this allocated memory, as only here
 558     // the criteria for a good heap are checked.
 559 
 560     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 561     // Give it several tries from top of range to bottom.
 562     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax LP64_ONLY(&& !UseCompatibleCompressedOops)) {
 563 
 564       // Calc address range within we try to attach (range of possible start addresses).
 565       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 566       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 567       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 568                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
 569     }
 570 
 571     // zerobased: Attempt to allocate in the lower 32G.
 572     char *zerobased_max = (char *)OopEncodingHeapMax;
 573 
 574     // Give it several tries from top of range to bottom.
 575     if (LP64_ONLY(!UseCompatibleCompressedOops &&)
 576         aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 577         ((_base == nullptr) ||                        // No previous try succeeded.
 578          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 579 
 580       // Calc address range within we try to attach (range of possible start addresses).
 581       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 582       // Need to be careful about size being guaranteed to be less
 583       // than UnscaledOopHeapMax due to type constraints.
 584       char *lowest_start = aligned_heap_base_min_address;
 585       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 586       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 587         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 588       }
 589       lowest_start = align_up(lowest_start, attach_point_alignment);
 590       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 591                         aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
 592     }
 593 
 594     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 595     // implement null checks.
 596     noaccess_prefix = noaccess_prefix_size(alignment);
 597 
 598     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 599     char** addresses = get_attach_addresses_for_disjoint_mode();
 600     int i = 0;
 601     while ((addresses[i] != nullptr) &&                    // End of array not yet reached.
 602            ((_base == nullptr) ||                          // No previous try succeeded.
 603             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 604              !CompressedOops::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 605       char* const attach_point = addresses[i];
 606       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 607       try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point);
 608       i++;
 609     }
 610 
 611     // Last, desperate try without any placement.
 612     if (_base == nullptr) {
 613       log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
 614       initialize(size + noaccess_prefix, alignment, page_size, nullptr, false);
 615     }
 616   }
 617 }
 618 
 619 #endif // _LP64
 620 
 621 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() {
 622 
 623   if (size == 0) {
 624     return;
 625   }
 626 
 627   if (heap_allocation_directory != nullptr) {
 628     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 629     if (_fd_for_heap == -1) {
 630       vm_exit_during_initialization(
 631         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 632     }
 633     // When there is a backing file directory for this space then whether
 634     // large pages are allocated is up to the filesystem of the backing file.
 635     // If requested, let the user know that explicit large pages can't be used.
 636     if (use_explicit_large_pages(page_size) && large_pages_requested()) {
 637       log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
 638     }
 639   }
 640 
 641   // Heap size should be aligned to alignment, too.
 642   guarantee(is_aligned(size, alignment), "set by caller");
 643 
 644   if (UseCompressedOops) {
 645 #ifdef _LP64
 646     initialize_compressed_heap(size, alignment, page_size);
 647     if (_size > size LP64_ONLY(|| UseCompatibleCompressedOops)) {
 648       // We allocated heap with noaccess prefix.
 649       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 650       // if we had to try at arbitrary address.
 651       establish_noaccess_prefix();
 652     }
 653 #else
 654     ShouldNotReachHere();
 655 #endif // _LP64
 656   } else {
 657     initialize(size, alignment, page_size, nullptr, false);
 658   }
 659 
 660   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 661          "area must be distinguishable from marks for mark-sweep");
 662   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 663          "area must be distinguishable from marks for mark-sweep");
 664 
 665   if (base() != nullptr) {
 666     MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
 667   }
 668 
 669   if (_fd_for_heap != -1) {
 670     ::close(_fd_for_heap);
 671   }
 672 }
 673 
 674 MemRegion ReservedHeapSpace::region() const {
 675   return MemRegion((HeapWord*)base(), (HeapWord*)end());
 676 }
 677 
 678 // Reserve space for code segment.  Same as Java heap only we mark this as
 679 // executable.
 680 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 681                                      size_t rs_align,
 682                                      size_t rs_page_size) : ReservedSpace() {
 683   initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true);
 684   MemTracker::record_virtual_memory_tag((address)base(), mtCode);
 685 }
 686 
 687 // VirtualSpace
 688 
 689 VirtualSpace::VirtualSpace() {
 690   _low_boundary           = nullptr;
 691   _high_boundary          = nullptr;
 692   _low                    = nullptr;
 693   _high                   = nullptr;
 694   _lower_high             = nullptr;
 695   _middle_high            = nullptr;
 696   _upper_high             = nullptr;
 697   _lower_high_boundary    = nullptr;
 698   _middle_high_boundary   = nullptr;
 699   _upper_high_boundary    = nullptr;
 700   _lower_alignment        = 0;
 701   _middle_alignment       = 0;
 702   _upper_alignment        = 0;
 703   _special                = false;
 704   _executable             = false;
 705 }
 706 
 707 
 708 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 709   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 710   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 711 }
 712 
 713 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 714   if(!rs.is_reserved()) return false;  // allocation failed.
 715   assert(_low_boundary == nullptr, "VirtualSpace already initialized");
 716   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 717 
 718   _low_boundary  = rs.base();
 719   _high_boundary = low_boundary() + rs.size();
 720 
 721   _low = low_boundary();
 722   _high = low();
 723 
 724   _special = rs.special();
 725   _executable = rs.executable();
 726 
 727   // When a VirtualSpace begins life at a large size, make all future expansion
 728   // and shrinking occur aligned to a granularity of large pages.  This avoids
 729   // fragmentation of physical addresses that inhibits the use of large pages
 730   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 731   // page size, the only spaces that get handled this way are codecache and
 732   // the heap itself, both of which provide a substantial performance
 733   // boost in many benchmarks when covered by large pages.
 734   //
 735   // No attempt is made to force large page alignment at the very top and
 736   // bottom of the space if they are not aligned so already.
 737   _lower_alignment  = os::vm_page_size();
 738   _middle_alignment = max_commit_granularity;
 739   _upper_alignment  = os::vm_page_size();
 740 
 741   // End of each region
 742   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 743   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 744   _upper_high_boundary = high_boundary();
 745 
 746   // High address of each region
 747   _lower_high = low_boundary();
 748   _middle_high = lower_high_boundary();
 749   _upper_high = middle_high_boundary();
 750 
 751   // commit to initial size
 752   if (committed_size > 0) {
 753     if (!expand_by(committed_size)) {
 754       return false;
 755     }
 756   }
 757   return true;
 758 }
 759 
 760 
 761 VirtualSpace::~VirtualSpace() {
 762   release();
 763 }
 764 
 765 
 766 void VirtualSpace::release() {
 767   // This does not release memory it reserved.
 768   // Caller must release via rs.release();
 769   _low_boundary           = nullptr;
 770   _high_boundary          = nullptr;
 771   _low                    = nullptr;
 772   _high                   = nullptr;
 773   _lower_high             = nullptr;
 774   _middle_high            = nullptr;
 775   _upper_high             = nullptr;
 776   _lower_high_boundary    = nullptr;
 777   _middle_high_boundary   = nullptr;
 778   _upper_high_boundary    = nullptr;
 779   _lower_alignment        = 0;
 780   _middle_alignment       = 0;
 781   _upper_alignment        = 0;
 782   _special                = false;
 783   _executable             = false;
 784 }
 785 
 786 
 787 size_t VirtualSpace::committed_size() const {
 788   return pointer_delta(high(), low(), sizeof(char));
 789 }
 790 
 791 
 792 size_t VirtualSpace::reserved_size() const {
 793   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 794 }
 795 
 796 
 797 size_t VirtualSpace::uncommitted_size()  const {
 798   return reserved_size() - committed_size();
 799 }
 800 
 801 size_t VirtualSpace::actual_committed_size() const {
 802   // Special VirtualSpaces commit all reserved space up front.
 803   if (special()) {
 804     return reserved_size();
 805   }
 806 
 807   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 808   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 809   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 810 
 811 #ifdef ASSERT
 812   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 813   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 814   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 815 
 816   if (committed_high > 0) {
 817     assert(committed_low == lower, "Must be");
 818     assert(committed_middle == middle, "Must be");
 819   }
 820 
 821   if (committed_middle > 0) {
 822     assert(committed_low == lower, "Must be");
 823   }
 824   if (committed_middle < middle) {
 825     assert(committed_high == 0, "Must be");
 826   }
 827 
 828   if (committed_low < lower) {
 829     assert(committed_high == 0, "Must be");
 830     assert(committed_middle == 0, "Must be");
 831   }
 832 #endif
 833 
 834   return committed_low + committed_middle + committed_high;
 835 }
 836 
 837 
 838 bool VirtualSpace::contains(const void* p) const {
 839   return low() <= (const char*) p && (const char*) p < high();
 840 }
 841 
 842 static void pretouch_expanded_memory(void* start, void* end) {
 843   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 844   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 845 
 846   os::pretouch_memory(start, end);
 847 }
 848 
 849 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 850   if (os::commit_memory(start, size, alignment, executable)) {
 851     if (pre_touch || AlwaysPreTouch) {
 852       pretouch_expanded_memory(start, start + size);
 853     }
 854     return true;
 855   }
 856 
 857   debug_only(warning(
 858       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 859       " size=" SIZE_FORMAT ", executable=%d) failed",
 860       p2i(start), p2i(start + size), size, executable);)
 861 
 862   return false;
 863 }
 864 
 865 /*
 866    First we need to determine if a particular virtual space is using large
 867    pages.  This is done at the initialize function and only virtual spaces
 868    that are larger than LargePageSizeInBytes use large pages.  Once we
 869    have determined this, all expand_by and shrink_by calls must grow and
 870    shrink by large page size chunks.  If a particular request
 871    is within the current large page, the call to commit and uncommit memory
 872    can be ignored.  In the case that the low and high boundaries of this
 873    space is not large page aligned, the pages leading to the first large
 874    page address and the pages after the last large page address must be
 875    allocated with default pages.
 876 */
 877 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 878   if (uncommitted_size() < bytes) {
 879     return false;
 880   }
 881 
 882   if (special()) {
 883     // don't commit memory if the entire space is pinned in memory
 884     _high += bytes;
 885     return true;
 886   }
 887 
 888   char* previous_high = high();
 889   char* unaligned_new_high = high() + bytes;
 890   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 891 
 892   // Calculate where the new high for each of the regions should be.  If
 893   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 894   // then the unaligned lower and upper new highs would be the
 895   // lower_high() and upper_high() respectively.
 896   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 897   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 898   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 899 
 900   // Align the new highs based on the regions alignment.  lower and upper
 901   // alignment will always be default page size.  middle alignment will be
 902   // LargePageSizeInBytes if the actual size of the virtual space is in
 903   // fact larger than LargePageSizeInBytes.
 904   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 905   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 906   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 907 
 908   // Determine which regions need to grow in this expand_by call.
 909   // If you are growing in the lower region, high() must be in that
 910   // region so calculate the size based on high().  For the middle and
 911   // upper regions, determine the starting point of growth based on the
 912   // location of high().  By getting the MAX of the region's low address
 913   // (or the previous region's high address) and high(), we can tell if it
 914   // is an intra or inter region growth.
 915   size_t lower_needs = 0;
 916   if (aligned_lower_new_high > lower_high()) {
 917     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 918   }
 919   size_t middle_needs = 0;
 920   if (aligned_middle_new_high > middle_high()) {
 921     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 922   }
 923   size_t upper_needs = 0;
 924   if (aligned_upper_new_high > upper_high()) {
 925     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 926   }
 927 
 928   // Check contiguity.
 929   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 930          "high address must be contained within the region");
 931   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 932          "high address must be contained within the region");
 933   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 934          "high address must be contained within the region");
 935 
 936   // Commit regions
 937   if (lower_needs > 0) {
 938     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 939     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 940       return false;
 941     }
 942     _lower_high += lower_needs;
 943   }
 944 
 945   if (middle_needs > 0) {
 946     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 947     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 948       return false;
 949     }
 950     _middle_high += middle_needs;
 951   }
 952 
 953   if (upper_needs > 0) {
 954     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 955     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 956       return false;
 957     }
 958     _upper_high += upper_needs;
 959   }
 960 
 961   _high += bytes;
 962   return true;
 963 }
 964 
 965 // A page is uncommitted if the contents of the entire page is deemed unusable.
 966 // Continue to decrement the high() pointer until it reaches a page boundary
 967 // in which case that particular page can now be uncommitted.
 968 void VirtualSpace::shrink_by(size_t size) {
 969   if (committed_size() < size)
 970     fatal("Cannot shrink virtual space to negative size");
 971 
 972   if (special()) {
 973     // don't uncommit if the entire space is pinned in memory
 974     _high -= size;
 975     return;
 976   }
 977 
 978   char* unaligned_new_high = high() - size;
 979   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 980 
 981   // Calculate new unaligned address
 982   char* unaligned_upper_new_high =
 983     MAX2(unaligned_new_high, middle_high_boundary());
 984   char* unaligned_middle_new_high =
 985     MAX2(unaligned_new_high, lower_high_boundary());
 986   char* unaligned_lower_new_high =
 987     MAX2(unaligned_new_high, low_boundary());
 988 
 989   // Align address to region's alignment
 990   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 991   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 992   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 993 
 994   // Determine which regions need to shrink
 995   size_t upper_needs = 0;
 996   if (aligned_upper_new_high < upper_high()) {
 997     upper_needs =
 998       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 999   }
1000   size_t middle_needs = 0;
1001   if (aligned_middle_new_high < middle_high()) {
1002     middle_needs =
1003       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
1004   }
1005   size_t lower_needs = 0;
1006   if (aligned_lower_new_high < lower_high()) {
1007     lower_needs =
1008       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
1009   }
1010 
1011   // Check contiguity.
1012   assert(middle_high_boundary() <= upper_high() &&
1013          upper_high() <= upper_high_boundary(),
1014          "high address must be contained within the region");
1015   assert(lower_high_boundary() <= middle_high() &&
1016          middle_high() <= middle_high_boundary(),
1017          "high address must be contained within the region");
1018   assert(low_boundary() <= lower_high() &&
1019          lower_high() <= lower_high_boundary(),
1020          "high address must be contained within the region");
1021 
1022   // Uncommit
1023   if (upper_needs > 0) {
1024     assert(middle_high_boundary() <= aligned_upper_new_high &&
1025            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
1026            "must not shrink beyond region");
1027     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
1028       debug_only(warning("os::uncommit_memory failed"));
1029       return;
1030     } else {
1031       _upper_high -= upper_needs;
1032     }
1033   }
1034   if (middle_needs > 0) {
1035     assert(lower_high_boundary() <= aligned_middle_new_high &&
1036            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1037            "must not shrink beyond region");
1038     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
1039       debug_only(warning("os::uncommit_memory failed"));
1040       return;
1041     } else {
1042       _middle_high -= middle_needs;
1043     }
1044   }
1045   if (lower_needs > 0) {
1046     assert(low_boundary() <= aligned_lower_new_high &&
1047            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1048            "must not shrink beyond region");
1049     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
1050       debug_only(warning("os::uncommit_memory failed"));
1051       return;
1052     } else {
1053       _lower_high -= lower_needs;
1054     }
1055   }
1056 
1057   _high -= size;
1058 }
1059 
1060 #ifndef PRODUCT
1061 void VirtualSpace::check_for_contiguity() {
1062   // Check contiguity.
1063   assert(low_boundary() <= lower_high() &&
1064          lower_high() <= lower_high_boundary(),
1065          "high address must be contained within the region");
1066   assert(lower_high_boundary() <= middle_high() &&
1067          middle_high() <= middle_high_boundary(),
1068          "high address must be contained within the region");
1069   assert(middle_high_boundary() <= upper_high() &&
1070          upper_high() <= upper_high_boundary(),
1071          "high address must be contained within the region");
1072   assert(low() >= low_boundary(), "low");
1073   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1074   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1075   assert(high() <= upper_high(), "upper high");
1076 }
1077 
1078 void VirtualSpace::print_on(outputStream* out) const {
1079   out->print   ("Virtual space:");
1080   if (special()) out->print(" (pinned in memory)");
1081   out->cr();
1082   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1083   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1084   out->print_cr(" - [low, high]:     [" PTR_FORMAT ", " PTR_FORMAT "]",  p2i(low()), p2i(high()));
1085   out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1086 }
1087 
1088 void VirtualSpace::print() const {
1089   print_on(tty);
1090 }
1091 
1092 #endif