1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "nmt/memTracker.hpp"
  30 #include "oops/compressedKlass.hpp"
  31 #include "oops/compressedOops.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/java.hpp"
  36 #include "runtime/os.hpp"
  37 #include "utilities/align.hpp"
  38 #include "utilities/formatBuffer.hpp"
  39 #include "utilities/powerOfTwo.hpp"
  40 
  41 // ReservedSpace
  42 
  43 // Dummy constructor
  44 ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0),
  45     _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
  46 }
  47 
  48 ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
  49   // Want to use large pages where possible. If the size is
  50   // not large page aligned the mapping will be a mix of
  51   // large and normal pages.
  52   size_t page_size = os::page_size_for_region_unaligned(size, 1);
  53   size_t alignment = os::vm_allocation_granularity();
  54   initialize(size, alignment, page_size, nullptr, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  58   // When a page size is given we don't want to mix large
  59   // and normal pages. If the size is not a multiple of the
  60   // page size it will be aligned up to achieve this.
  61   size_t alignment = os::vm_allocation_granularity();;
  62   if (preferred_page_size != os::vm_page_size()) {
  63     alignment = MAX2(preferred_page_size, alignment);
  64     size = align_up(size, alignment);
  65   }
  66   initialize(size, alignment, preferred_page_size, nullptr, false);
  67 }
  68 
  69 ReservedSpace::ReservedSpace(size_t size,
  70                              size_t alignment,
  71                              size_t page_size,
  72                              char* requested_address) : _fd_for_heap(-1) {
  73   initialize(size, alignment, page_size, requested_address, false);
  74 }
  75 
  76 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size,
  77                              bool special, bool executable) : _fd_for_heap(-1) {
  78   assert((size % os::vm_allocation_granularity()) == 0,
  79          "size not allocation aligned");
  80   initialize_members(base, size, alignment, page_size, special, executable);
  81 }
  82 
  83 // Helper method
  84 static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) {
  85   if (fd != -1) {
  86     return os::attempt_map_memory_to_file_at(base, size, fd);
  87   }
  88   return os::attempt_reserve_memory_at(base, size, executable);
  89 }
  90 
  91 // Helper method
  92 static char* map_or_reserve_memory(size_t size, int fd, bool executable) {
  93   if (fd != -1) {
  94     return os::map_memory_to_file(size, fd);
  95   }
  96   return os::reserve_memory(size, executable);
  97 }
  98 
  99 // Helper method
 100 static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) {
 101   if (fd != -1) {
 102     return os::map_memory_to_file_aligned(size, alignment, fd);
 103   }
 104   return os::reserve_memory_aligned(size, alignment, executable);
 105 }
 106 
 107 // Helper method
 108 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
 109   if (is_file_mapped) {
 110     if (!os::unmap_memory(base, size)) {
 111       fatal("os::unmap_memory failed");
 112     }
 113   } else if (!os::release_memory(base, size)) {
 114     fatal("os::release_memory failed");
 115   }
 116 }
 117 
 118 // Helper method
 119 static bool failed_to_reserve_as_requested(char* base, char* requested_address) {
 120   if (base == requested_address || requested_address == nullptr) {
 121     return false; // did not fail
 122   }
 123 
 124   if (base != nullptr) {
 125     // Different reserve address may be acceptable in other cases
 126     // but for compressed oops heap should be at requested address.
 127     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 128     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 129   }
 130   return true;
 131 }
 132 
 133 static bool use_explicit_large_pages(size_t page_size) {
 134   return !os::can_commit_large_page_memory() &&
 135          page_size != os::vm_page_size();
 136 }
 137 
 138 static bool large_pages_requested() {
 139   return UseLargePages &&
 140          (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
 141 }
 142 
 143 static void log_on_large_pages_failure(char* req_addr, size_t bytes) {
 144   if (large_pages_requested()) {
 145     // Compressed oops logging.
 146     log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 147     // JVM style warning that we did not succeed in using large pages.
 148     char msg[128];
 149     jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory using large pages. "
 150                                    "req_addr: " PTR_FORMAT " bytes: " SIZE_FORMAT,
 151                                    req_addr, bytes);
 152     warning("%s", msg);
 153   }
 154 }
 155 
 156 static char* reserve_memory(char* requested_address, const size_t size,
 157                             const size_t alignment, int fd, bool exec) {
 158   char* base;
 159   // If the memory was requested at a particular address, use
 160   // os::attempt_reserve_memory_at() to avoid mapping over something
 161   // important.  If the reservation fails, return null.
 162   if (requested_address != nullptr) {
 163     assert(is_aligned(requested_address, alignment),
 164            "Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
 165            p2i(requested_address), alignment);
 166     base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec);
 167   } else {
 168     // Optimistically assume that the OS returns an aligned base pointer.
 169     // When reserving a large address range, most OSes seem to align to at
 170     // least 64K.
 171     base = map_or_reserve_memory(size, fd, exec);
 172     // Check alignment constraints. This is only needed when there is
 173     // no requested address.
 174     if (!is_aligned(base, alignment)) {
 175       // Base not aligned, retry.
 176       unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
 177       // Map using the requested alignment.
 178       base = map_or_reserve_memory_aligned(size, alignment, fd, exec);
 179     }
 180   }
 181 
 182   return base;
 183 }
 184 
 185 static char* reserve_memory_special(char* requested_address, const size_t size,
 186                                     const size_t alignment, const size_t page_size, bool exec) {
 187 
 188   log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, "
 189                       "alignment: " SIZE_FORMAT "%s",
 190                       byte_size_in_exact_unit(size), exact_unit_for_byte_size(size),
 191                       byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
 192 
 193   char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
 194   if (base != nullptr) {
 195     // Check alignment constraints.
 196     assert(is_aligned(base, alignment),
 197            "reserve_memory_special() returned an unaligned address, base: " PTR_FORMAT
 198            " alignment: " SIZE_FORMAT_X,
 199            p2i(base), alignment);
 200   }
 201   return base;
 202 }
 203 
 204 void ReservedSpace::clear_members() {
 205   initialize_members(nullptr, 0, 0, 0, false, false);
 206 }
 207 
 208 void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment,
 209                                        size_t page_size, bool special, bool executable) {
 210   _base = base;
 211   _size = size;
 212   _alignment = alignment;
 213   _page_size = page_size;
 214   _special = special;
 215   _executable = executable;
 216   _noaccess_prefix = 0;
 217 }
 218 
 219 void ReservedSpace::reserve(size_t size,
 220                             size_t alignment,
 221                             size_t page_size,
 222                             char* requested_address,
 223                             bool executable) {
 224   assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment");
 225 
 226   // There are basically three different cases that we need to handle below:
 227   // 1. Mapping backed by a file
 228   // 2. Mapping backed by explicit large pages
 229   // 3. Mapping backed by normal pages or transparent huge pages
 230   // The first two have restrictions that requires the whole mapping to be
 231   // committed up front. To record this the ReservedSpace is marked 'special'.
 232 
 233   // == Case 1 ==
 234   if (_fd_for_heap != -1) {
 235     // When there is a backing file directory for this space then whether
 236     // large pages are allocated is up to the filesystem of the backing file.
 237     // So UseLargePages is not taken into account for this reservation.
 238     char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
 239     if (base != nullptr) {
 240       initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
 241     }
 242     // Always return, not possible to fall back to reservation not using a file.
 243     return;
 244   }
 245 
 246   // == Case 2 ==
 247   if (use_explicit_large_pages(page_size)) {
 248     // System can't commit large pages i.e. use transparent huge pages and
 249     // the caller requested large pages. To satisfy this request we use
 250     // explicit large pages and these have to be committed up front to ensure
 251     // no reservations are lost.
 252     do {
 253       char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable);
 254       if (base != nullptr) {
 255         // Successful reservation using large pages.
 256         initialize_members(base, size, alignment, page_size, true, executable);
 257         return;
 258       }
 259       page_size = os::page_sizes().next_smaller(page_size);
 260     } while (page_size > os::vm_page_size());
 261 
 262     // Failed to reserve explicit large pages, do proper logging.
 263     log_on_large_pages_failure(requested_address, size);
 264     // Now fall back to normal reservation.
 265     assert(page_size == os::vm_page_size(), "inv");
 266   }
 267 
 268   // == Case 3 ==
 269   char* base = reserve_memory(requested_address, size, alignment, -1, executable);
 270   if (base != nullptr) {
 271     // Successful mapping.
 272     initialize_members(base, size, alignment, page_size, false, executable);
 273   }
 274 }
 275 
 276 void ReservedSpace::initialize(size_t size,
 277                                size_t alignment,
 278                                size_t page_size,
 279                                char* requested_address,
 280                                bool executable) {
 281   const size_t granularity = os::vm_allocation_granularity();
 282   assert((size & (granularity - 1)) == 0,
 283          "size not aligned to os::vm_allocation_granularity()");
 284   assert((alignment & (granularity - 1)) == 0,
 285          "alignment not aligned to os::vm_allocation_granularity()");
 286   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 287          "not a power of 2");
 288   assert(page_size >= os::vm_page_size(), "Invalid page size");
 289   assert(is_power_of_2(page_size), "Invalid page size");
 290 
 291   clear_members();
 292 
 293   if (size == 0) {
 294     return;
 295   }
 296 
 297   // Adjust alignment to not be 0.
 298   alignment = MAX2(alignment, os::vm_page_size());
 299 
 300   // Reserve the memory.
 301   reserve(size, alignment, page_size, requested_address, executable);
 302 
 303   // Check that the requested address is used if given.
 304   if (failed_to_reserve_as_requested(_base, requested_address)) {
 305     // OS ignored the requested address, release the reservation.
 306     release();
 307     return;
 308   }
 309 }
 310 
 311 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) {
 312   assert(partition_size <= size(), "partition failed");
 313   ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable());
 314   return result;
 315 }
 316 
 317 ReservedSpace ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 318   assert(partition_size <= size(), "partition failed");
 319   ReservedSpace result(base() + partition_size, size() - partition_size,
 320                        alignment, page_size(), special(), executable());
 321   return result;
 322 }
 323 
 324 ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size, size_t alignment) {
 325   assert(offset + partition_size <= size(), "partition failed");
 326   ReservedSpace result(base() + offset, partition_size, alignment, page_size(), special(), executable());
 327   return result;
 328 }
 329 
 330 size_t ReservedSpace::page_align_size_up(size_t size) {
 331   return align_up(size, os::vm_page_size());
 332 }
 333 
 334 
 335 size_t ReservedSpace::page_align_size_down(size_t size) {
 336   return align_down(size, os::vm_page_size());
 337 }
 338 
 339 
 340 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 341   return align_up(size, os::vm_allocation_granularity());
 342 }
 343 
 344 void ReservedSpace::release() {
 345   if (is_reserved()) {
 346     char *real_base = _base - _noaccess_prefix;
 347     const size_t real_size = _size + _noaccess_prefix;
 348     if (special()) {
 349       if (_fd_for_heap != -1) {
 350         os::unmap_memory(real_base, real_size);
 351       } else {
 352         os::release_memory_special(real_base, real_size);
 353       }
 354     } else{
 355       os::release_memory(real_base, real_size);
 356     }
 357     clear_members();
 358   }
 359 }
 360 
 361 // Put a ReservedSpace over an existing range
 362 ReservedSpace ReservedSpace::space_for_range(char* base, size_t size, size_t alignment,
 363                                              size_t page_size, bool special, bool executable) {
 364   assert(is_aligned(base, os::vm_allocation_granularity()), "Unaligned base");
 365   assert(is_aligned(size, os::vm_page_size()), "Unaligned size");
 366   assert(os::page_sizes().contains(page_size), "Invalid pagesize");
 367   ReservedSpace space;
 368   space.initialize_members(base, size, alignment, page_size, special, executable);
 369   return space;
 370 }
 371 
 372 // Compressed oop support is not relevant in 32bit builds.
 373 #ifdef _LP64
 374 
 375 static size_t noaccess_prefix_size(size_t alignment) {
 376   return lcm(os::vm_page_size(), alignment);
 377 }
 378 
 379 void ReservedHeapSpace::establish_noaccess_prefix() {
 380   assert(_alignment >= os::vm_page_size(), "must be at least page size big");
 381   _noaccess_prefix = noaccess_prefix_size(_alignment);
 382 
 383   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 384     if (true
 385         WIN64_ONLY(&& !UseLargePages)
 386         AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) {
 387       // Protect memory at the base of the allocated region.
 388       // If special, the page was committed (only matters on windows)
 389       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 390         fatal("cannot protect protection page");
 391       }
 392       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 393                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 394                                  p2i(_base),
 395                                  _noaccess_prefix);
 396       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
 397     } else {
 398       CompressedOops::set_use_implicit_null_checks(false);
 399     }
 400   }
 401 
 402   _base += _noaccess_prefix;
 403   _size -= _noaccess_prefix;
 404   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 405 }
 406 
 407 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 408 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 409 // might still fulfill the wishes of the caller.
 410 // Assures the memory is aligned to 'alignment'.
 411 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 412 void ReservedHeapSpace::try_reserve_heap(size_t size,
 413                                          size_t alignment,
 414                                          size_t page_size,
 415                                          char* requested_address) {
 416   if (_base != nullptr) {
 417     // We tried before, but we didn't like the address delivered.
 418     release();
 419   }
 420 
 421   // Try to reserve the memory for the heap.
 422   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 423                              " heap of size " SIZE_FORMAT_X,
 424                              p2i(requested_address),
 425                              size);
 426 
 427   reserve(size, alignment, page_size, requested_address, false);
 428 
 429   // Check alignment constraints.
 430   if (is_reserved() && !is_aligned(_base, _alignment)) {
 431     // Base not aligned, retry.
 432     release();
 433   }
 434 }
 435 
 436 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 437                                           char *lowest_start,
 438                                           size_t attach_point_alignment,
 439                                           char *aligned_heap_base_min_address,
 440                                           char *upper_bound,
 441                                           size_t size,
 442                                           size_t alignment,
 443                                           size_t page_size) {
 444   const size_t attach_range = highest_start - lowest_start;
 445   // Cap num_attempts at possible number.
 446   // At least one is possible even for 0 sized attach range.
 447   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 448   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 449 
 450   const size_t stepsize = (attach_range == 0) ? // Only one try.
 451     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 452 
 453   // Try attach points from top to bottom.
 454   char* attach_point = highest_start;
 455   while (attach_point >= lowest_start  &&
 456          attach_point <= highest_start &&  // Avoid wrap around.
 457          ((_base == nullptr) ||
 458           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 459     try_reserve_heap(size, alignment, page_size, attach_point);
 460     attach_point -= stepsize;
 461   }
 462 }
 463 
 464 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 465 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 466 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 467 
 468 // Helper for heap allocation. Returns an array with addresses
 469 // (OS-specific) which are suited for disjoint base mode. Array is
 470 // null terminated.
 471 static char** get_attach_addresses_for_disjoint_mode() {
 472   static uint64_t addresses[] = {
 473      2 * SIZE_32G,
 474      3 * SIZE_32G,
 475      4 * SIZE_32G,
 476      8 * SIZE_32G,
 477     10 * SIZE_32G,
 478      1 * SIZE_64K * SIZE_32G,
 479      2 * SIZE_64K * SIZE_32G,
 480      3 * SIZE_64K * SIZE_32G,
 481      4 * SIZE_64K * SIZE_32G,
 482     16 * SIZE_64K * SIZE_32G,
 483     32 * SIZE_64K * SIZE_32G,
 484     34 * SIZE_64K * SIZE_32G,
 485     0
 486   };
 487 
 488   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 489   // the array is sorted.
 490   uint i = 0;
 491   while (addresses[i] != 0 &&
 492          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 493     i++;
 494   }
 495   uint start = i;
 496 
 497   // Avoid more steps than requested.
 498   i = 0;
 499   while (addresses[start+i] != 0) {
 500     if (i == HeapSearchSteps) {
 501       addresses[start+i] = 0;
 502       break;
 503     }
 504     i++;
 505   }
 506 
 507   return (char**) &addresses[start];
 508 }
 509 
 510 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) {
 511   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 512             "can not allocate compressed oop heap for this size");
 513   guarantee(alignment == MAX2(alignment, os::vm_page_size()), "alignment too small");
 514 
 515   const size_t granularity = os::vm_allocation_granularity();
 516   assert((size & (granularity - 1)) == 0,
 517          "size not aligned to os::vm_allocation_granularity()");
 518   assert((alignment & (granularity - 1)) == 0,
 519          "alignment not aligned to os::vm_allocation_granularity()");
 520   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 521          "not a power of 2");
 522 
 523   // The necessary attach point alignment for generated wish addresses.
 524   // This is needed to increase the chance of attaching for mmap and shmat.
 525   // AIX is the only platform that uses System V shm for reserving virtual memory.
 526   // In this case, the required alignment of the allocated size (64K) and the alignment
 527   // of possible start points of the memory region (256M) differ.
 528   // This is not reflected by os_allocation_granularity().
 529   // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp
 530   const size_t os_attach_point_alignment =
 531     AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M)
 532     NOT_AIX(os::vm_allocation_granularity());
 533 
 534   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 535 
 536   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 537   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 538     noaccess_prefix_size(alignment) : 0;
 539 
 540   // Attempt to alloc at user-given address.
 541   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 542     try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
 543     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 544       release();
 545     }
 546   }
 547 
 548   // Keep heap at HeapBaseMinAddress.
 549   if (_base == nullptr) {
 550 
 551     // Try to allocate the heap at addresses that allow efficient oop compression.
 552     // Different schemes are tried, in order of decreasing optimization potential.
 553     //
 554     // For this, try_reserve_heap() is called with the desired heap base addresses.
 555     // A call into the os layer to allocate at a given address can return memory
 556     // at a different address than requested.  Still, this might be memory at a useful
 557     // address. try_reserve_heap() always returns this allocated memory, as only here
 558     // the criteria for a good heap are checked.
 559 
 560     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 561     // Give it several tries from top of range to bottom.
 562     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 563 
 564       // Calc address range within we try to attach (range of possible start addresses).
 565       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 566       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 567       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 568                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
 569     }
 570 
 571     // zerobased: Attempt to allocate in the lower 32G.
 572     char *zerobased_max = (char *)OopEncodingHeapMax;
 573 
 574     // Give it several tries from top of range to bottom.
 575     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 576         ((_base == nullptr) ||                        // No previous try succeeded.
 577          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 578 
 579       // Calc address range within we try to attach (range of possible start addresses).
 580       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 581       // Need to be careful about size being guaranteed to be less
 582       // than UnscaledOopHeapMax due to type constraints.
 583       char *lowest_start = aligned_heap_base_min_address;
 584       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 585       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 586         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 587       }
 588       lowest_start = align_up(lowest_start, attach_point_alignment);
 589       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 590                         aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
 591     }
 592 
 593     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 594     // implement null checks.
 595     noaccess_prefix = noaccess_prefix_size(alignment);
 596 
 597     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 598     char** addresses = get_attach_addresses_for_disjoint_mode();
 599     int i = 0;
 600     while ((addresses[i] != nullptr) &&                    // End of array not yet reached.
 601            ((_base == nullptr) ||                          // No previous try succeeded.
 602             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 603              !CompressedOops::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 604       char* const attach_point = addresses[i];
 605       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 606       try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point);
 607       i++;
 608     }
 609 
 610     // Last, desperate try without any placement.
 611     if (_base == nullptr) {
 612       log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
 613       initialize(size + noaccess_prefix, alignment, page_size, nullptr, false);
 614     }
 615   }
 616 }
 617 
 618 #endif // _LP64
 619 
 620 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() {
 621 
 622   if (size == 0) {
 623     return;
 624   }
 625 
 626   if (heap_allocation_directory != nullptr) {
 627     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 628     if (_fd_for_heap == -1) {
 629       vm_exit_during_initialization(
 630         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 631     }
 632     // When there is a backing file directory for this space then whether
 633     // large pages are allocated is up to the filesystem of the backing file.
 634     // If requested, let the user know that explicit large pages can't be used.
 635     if (use_explicit_large_pages(page_size) && large_pages_requested()) {
 636       log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
 637     }
 638   }
 639 
 640   // Heap size should be aligned to alignment, too.
 641   guarantee(is_aligned(size, alignment), "set by caller");
 642 
 643   if (UseCompressedOops) {
 644 #ifdef _LP64
 645     initialize_compressed_heap(size, alignment, page_size);
 646     if (_size > size) {
 647       // We allocated heap with noaccess prefix.
 648       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 649       // if we had to try at arbitrary address.
 650       establish_noaccess_prefix();
 651     }
 652 #else
 653     ShouldNotReachHere();
 654 #endif // _LP64
 655   } else {
 656     initialize(size, alignment, page_size, nullptr, false);
 657   }
 658 
 659   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 660          "area must be distinguishable from marks for mark-sweep");
 661   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 662          "area must be distinguishable from marks for mark-sweep");
 663 
 664   if (base() != nullptr) {
 665     MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap);
 666   }
 667 
 668   if (_fd_for_heap != -1) {
 669     ::close(_fd_for_heap);
 670   }
 671 }
 672 
 673 MemRegion ReservedHeapSpace::region() const {
 674   return MemRegion((HeapWord*)base(), (HeapWord*)end());
 675 }
 676 
 677 // Reserve space for code segment.  Same as Java heap only we mark this as
 678 // executable.
 679 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 680                                      size_t rs_align,
 681                                      size_t rs_page_size) : ReservedSpace() {
 682   initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true);
 683   MemTracker::record_virtual_memory_tag((address)base(), mtCode);
 684 }
 685 
 686 // VirtualSpace
 687 
 688 VirtualSpace::VirtualSpace() {
 689   _low_boundary           = nullptr;
 690   _high_boundary          = nullptr;
 691   _low                    = nullptr;
 692   _high                   = nullptr;
 693   _lower_high             = nullptr;
 694   _middle_high            = nullptr;
 695   _upper_high             = nullptr;
 696   _lower_high_boundary    = nullptr;
 697   _middle_high_boundary   = nullptr;
 698   _upper_high_boundary    = nullptr;
 699   _lower_alignment        = 0;
 700   _middle_alignment       = 0;
 701   _upper_alignment        = 0;
 702   _special                = false;
 703   _executable             = false;
 704 }
 705 
 706 
 707 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 708   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 709   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 710 }
 711 
 712 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 713   if(!rs.is_reserved()) return false;  // allocation failed.
 714   assert(_low_boundary == nullptr, "VirtualSpace already initialized");
 715   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 716 
 717   _low_boundary  = rs.base();
 718   _high_boundary = low_boundary() + rs.size();
 719 
 720   _low = low_boundary();
 721   _high = low();
 722 
 723   _special = rs.special();
 724   _executable = rs.executable();
 725 
 726   // When a VirtualSpace begins life at a large size, make all future expansion
 727   // and shrinking occur aligned to a granularity of large pages.  This avoids
 728   // fragmentation of physical addresses that inhibits the use of large pages
 729   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 730   // page size, the only spaces that get handled this way are codecache and
 731   // the heap itself, both of which provide a substantial performance
 732   // boost in many benchmarks when covered by large pages.
 733   //
 734   // No attempt is made to force large page alignment at the very top and
 735   // bottom of the space if they are not aligned so already.
 736   _lower_alignment  = os::vm_page_size();
 737   _middle_alignment = max_commit_granularity;
 738   _upper_alignment  = os::vm_page_size();
 739 
 740   // End of each region
 741   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 742   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 743   _upper_high_boundary = high_boundary();
 744 
 745   // High address of each region
 746   _lower_high = low_boundary();
 747   _middle_high = lower_high_boundary();
 748   _upper_high = middle_high_boundary();
 749 
 750   // commit to initial size
 751   if (committed_size > 0) {
 752     if (!expand_by(committed_size)) {
 753       return false;
 754     }
 755   }
 756   return true;
 757 }
 758 
 759 
 760 VirtualSpace::~VirtualSpace() {
 761   release();
 762 }
 763 
 764 
 765 void VirtualSpace::release() {
 766   // This does not release memory it reserved.
 767   // Caller must release via rs.release();
 768   _low_boundary           = nullptr;
 769   _high_boundary          = nullptr;
 770   _low                    = nullptr;
 771   _high                   = nullptr;
 772   _lower_high             = nullptr;
 773   _middle_high            = nullptr;
 774   _upper_high             = nullptr;
 775   _lower_high_boundary    = nullptr;
 776   _middle_high_boundary   = nullptr;
 777   _upper_high_boundary    = nullptr;
 778   _lower_alignment        = 0;
 779   _middle_alignment       = 0;
 780   _upper_alignment        = 0;
 781   _special                = false;
 782   _executable             = false;
 783 }
 784 
 785 
 786 size_t VirtualSpace::committed_size() const {
 787   return pointer_delta(high(), low(), sizeof(char));
 788 }
 789 
 790 
 791 size_t VirtualSpace::reserved_size() const {
 792   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 793 }
 794 
 795 
 796 size_t VirtualSpace::uncommitted_size()  const {
 797   return reserved_size() - committed_size();
 798 }
 799 
 800 size_t VirtualSpace::actual_committed_size() const {
 801   // Special VirtualSpaces commit all reserved space up front.
 802   if (special()) {
 803     return reserved_size();
 804   }
 805 
 806   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 807   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 808   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 809 
 810 #ifdef ASSERT
 811   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 812   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 813   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 814 
 815   if (committed_high > 0) {
 816     assert(committed_low == lower, "Must be");
 817     assert(committed_middle == middle, "Must be");
 818   }
 819 
 820   if (committed_middle > 0) {
 821     assert(committed_low == lower, "Must be");
 822   }
 823   if (committed_middle < middle) {
 824     assert(committed_high == 0, "Must be");
 825   }
 826 
 827   if (committed_low < lower) {
 828     assert(committed_high == 0, "Must be");
 829     assert(committed_middle == 0, "Must be");
 830   }
 831 #endif
 832 
 833   return committed_low + committed_middle + committed_high;
 834 }
 835 
 836 
 837 bool VirtualSpace::contains(const void* p) const {
 838   return low() <= (const char*) p && (const char*) p < high();
 839 }
 840 
 841 static void pretouch_expanded_memory(void* start, void* end) {
 842   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 843   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 844 
 845   os::pretouch_memory(start, end);
 846 }
 847 
 848 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 849   if (os::commit_memory(start, size, alignment, executable)) {
 850     if (pre_touch || AlwaysPreTouch) {
 851       pretouch_expanded_memory(start, start + size);
 852     }
 853     return true;
 854   }
 855 
 856   debug_only(warning(
 857       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 858       " size=" SIZE_FORMAT ", executable=%d) failed",
 859       p2i(start), p2i(start + size), size, executable);)
 860 
 861   return false;
 862 }
 863 
 864 /*
 865    First we need to determine if a particular virtual space is using large
 866    pages.  This is done at the initialize function and only virtual spaces
 867    that are larger than LargePageSizeInBytes use large pages.  Once we
 868    have determined this, all expand_by and shrink_by calls must grow and
 869    shrink by large page size chunks.  If a particular request
 870    is within the current large page, the call to commit and uncommit memory
 871    can be ignored.  In the case that the low and high boundaries of this
 872    space is not large page aligned, the pages leading to the first large
 873    page address and the pages after the last large page address must be
 874    allocated with default pages.
 875 */
 876 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 877   if (uncommitted_size() < bytes) {
 878     return false;
 879   }
 880 
 881   if (special()) {
 882     // don't commit memory if the entire space is pinned in memory
 883     _high += bytes;
 884     return true;
 885   }
 886 
 887   char* previous_high = high();
 888   char* unaligned_new_high = high() + bytes;
 889   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 890 
 891   // Calculate where the new high for each of the regions should be.  If
 892   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 893   // then the unaligned lower and upper new highs would be the
 894   // lower_high() and upper_high() respectively.
 895   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 896   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 897   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 898 
 899   // Align the new highs based on the regions alignment.  lower and upper
 900   // alignment will always be default page size.  middle alignment will be
 901   // LargePageSizeInBytes if the actual size of the virtual space is in
 902   // fact larger than LargePageSizeInBytes.
 903   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 904   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 905   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 906 
 907   // Determine which regions need to grow in this expand_by call.
 908   // If you are growing in the lower region, high() must be in that
 909   // region so calculate the size based on high().  For the middle and
 910   // upper regions, determine the starting point of growth based on the
 911   // location of high().  By getting the MAX of the region's low address
 912   // (or the previous region's high address) and high(), we can tell if it
 913   // is an intra or inter region growth.
 914   size_t lower_needs = 0;
 915   if (aligned_lower_new_high > lower_high()) {
 916     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 917   }
 918   size_t middle_needs = 0;
 919   if (aligned_middle_new_high > middle_high()) {
 920     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 921   }
 922   size_t upper_needs = 0;
 923   if (aligned_upper_new_high > upper_high()) {
 924     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 925   }
 926 
 927   // Check contiguity.
 928   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 929          "high address must be contained within the region");
 930   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 931          "high address must be contained within the region");
 932   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 933          "high address must be contained within the region");
 934 
 935   // Commit regions
 936   if (lower_needs > 0) {
 937     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 938     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 939       return false;
 940     }
 941     _lower_high += lower_needs;
 942   }
 943 
 944   if (middle_needs > 0) {
 945     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 946     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 947       return false;
 948     }
 949     _middle_high += middle_needs;
 950   }
 951 
 952   if (upper_needs > 0) {
 953     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 954     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 955       return false;
 956     }
 957     _upper_high += upper_needs;
 958   }
 959 
 960   _high += bytes;
 961   return true;
 962 }
 963 
 964 // A page is uncommitted if the contents of the entire page is deemed unusable.
 965 // Continue to decrement the high() pointer until it reaches a page boundary
 966 // in which case that particular page can now be uncommitted.
 967 void VirtualSpace::shrink_by(size_t size) {
 968   if (committed_size() < size)
 969     fatal("Cannot shrink virtual space to negative size");
 970 
 971   if (special()) {
 972     // don't uncommit if the entire space is pinned in memory
 973     _high -= size;
 974     return;
 975   }
 976 
 977   char* unaligned_new_high = high() - size;
 978   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 979 
 980   // Calculate new unaligned address
 981   char* unaligned_upper_new_high =
 982     MAX2(unaligned_new_high, middle_high_boundary());
 983   char* unaligned_middle_new_high =
 984     MAX2(unaligned_new_high, lower_high_boundary());
 985   char* unaligned_lower_new_high =
 986     MAX2(unaligned_new_high, low_boundary());
 987 
 988   // Align address to region's alignment
 989   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 990   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 991   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 992 
 993   // Determine which regions need to shrink
 994   size_t upper_needs = 0;
 995   if (aligned_upper_new_high < upper_high()) {
 996     upper_needs =
 997       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 998   }
 999   size_t middle_needs = 0;
1000   if (aligned_middle_new_high < middle_high()) {
1001     middle_needs =
1002       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
1003   }
1004   size_t lower_needs = 0;
1005   if (aligned_lower_new_high < lower_high()) {
1006     lower_needs =
1007       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
1008   }
1009 
1010   // Check contiguity.
1011   assert(middle_high_boundary() <= upper_high() &&
1012          upper_high() <= upper_high_boundary(),
1013          "high address must be contained within the region");
1014   assert(lower_high_boundary() <= middle_high() &&
1015          middle_high() <= middle_high_boundary(),
1016          "high address must be contained within the region");
1017   assert(low_boundary() <= lower_high() &&
1018          lower_high() <= lower_high_boundary(),
1019          "high address must be contained within the region");
1020 
1021   // Uncommit
1022   if (upper_needs > 0) {
1023     assert(middle_high_boundary() <= aligned_upper_new_high &&
1024            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
1025            "must not shrink beyond region");
1026     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
1027       debug_only(warning("os::uncommit_memory failed"));
1028       return;
1029     } else {
1030       _upper_high -= upper_needs;
1031     }
1032   }
1033   if (middle_needs > 0) {
1034     assert(lower_high_boundary() <= aligned_middle_new_high &&
1035            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1036            "must not shrink beyond region");
1037     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
1038       debug_only(warning("os::uncommit_memory failed"));
1039       return;
1040     } else {
1041       _middle_high -= middle_needs;
1042     }
1043   }
1044   if (lower_needs > 0) {
1045     assert(low_boundary() <= aligned_lower_new_high &&
1046            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1047            "must not shrink beyond region");
1048     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
1049       debug_only(warning("os::uncommit_memory failed"));
1050       return;
1051     } else {
1052       _lower_high -= lower_needs;
1053     }
1054   }
1055 
1056   _high -= size;
1057 }
1058 
1059 #ifndef PRODUCT
1060 void VirtualSpace::check_for_contiguity() {
1061   // Check contiguity.
1062   assert(low_boundary() <= lower_high() &&
1063          lower_high() <= lower_high_boundary(),
1064          "high address must be contained within the region");
1065   assert(lower_high_boundary() <= middle_high() &&
1066          middle_high() <= middle_high_boundary(),
1067          "high address must be contained within the region");
1068   assert(middle_high_boundary() <= upper_high() &&
1069          upper_high() <= upper_high_boundary(),
1070          "high address must be contained within the region");
1071   assert(low() >= low_boundary(), "low");
1072   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1073   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1074   assert(high() <= upper_high(), "upper high");
1075 }
1076 
1077 void VirtualSpace::print_on(outputStream* out) const {
1078   out->print   ("Virtual space:");
1079   if (special()) out->print(" (pinned in memory)");
1080   out->cr();
1081   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1082   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1083   out->print_cr(" - [low, high]:     [" PTR_FORMAT ", " PTR_FORMAT "]",  p2i(low()), p2i(high()));
1084   out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1085 }
1086 
1087 void VirtualSpace::print() const {
1088   print_on(tty);
1089 }
1090 
1091 #endif