1 /*
   2  * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/compressedOops.hpp"
  30 #include "oops/markWord.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/globals_extension.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/os.hpp"
  35 #include "services/memTracker.hpp"
  36 #include "utilities/align.hpp"
  37 #include "utilities/formatBuffer.hpp"
  38 #include "utilities/powerOfTwo.hpp"
  39 
  40 // ReservedSpace
  41 
  42 // Dummy constructor
  43 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  44     _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
  45 }
  46 
  47 ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) {
  48   // Want to use large pages where possible. If the size is
  49   // not large page aligned the mapping will be a mix of
  50   // large and normal pages.
  51   size_t page_size = os::page_size_for_region_unaligned(size, 1);
  52   size_t alignment = os::vm_allocation_granularity();
  53   initialize(size, alignment, page_size, NULL, false);
  54 }
  55 
  56 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  57   // When a page size is given we don't want to mix large
  58   // and normal pages. If the size is not a multiple of the
  59   // page size it will be aligned up to achieve this.
  60   size_t alignment = os::vm_allocation_granularity();;
  61   if (preferred_page_size != (size_t)os::vm_page_size()) {
  62     alignment = MAX2(preferred_page_size, alignment);
  63     size = align_up(size, alignment);
  64   }
  65   initialize(size, alignment, preferred_page_size, NULL, false);
  66 }
  67 
  68 ReservedSpace::ReservedSpace(size_t size,
  69                              size_t alignment,
  70                              size_t page_size,
  71                              char* requested_address) : _fd_for_heap(-1) {
  72   initialize(size, alignment, page_size, requested_address, false);
  73 }
  74 
  75 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size,
  76                              bool special, bool executable) : _fd_for_heap(-1) {
  77   assert((size % os::vm_allocation_granularity()) == 0,
  78          "size not allocation aligned");
  79   initialize_members(base, size, alignment, page_size, special, executable);
  80 }
  81 
  82 // Helper method
  83 static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) {
  84   if (fd != -1) {
  85     return os::attempt_map_memory_to_file_at(base, size, fd);
  86   }
  87   return os::attempt_reserve_memory_at(base, size, executable);
  88 }
  89 
  90 // Helper method
  91 static char* map_or_reserve_memory(size_t size, int fd, bool executable) {
  92   if (fd != -1) {
  93     return os::map_memory_to_file(size, fd);
  94   }
  95   return os::reserve_memory(size, executable);
  96 }
  97 
  98 // Helper method
  99 static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) {
 100   if (fd != -1) {
 101     return os::map_memory_to_file_aligned(size, alignment, fd);
 102   }
 103   return os::reserve_memory_aligned(size, alignment, executable);
 104 }
 105 
 106 // Helper method
 107 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
 108   if (is_file_mapped) {
 109     if (!os::unmap_memory(base, size)) {
 110       fatal("os::unmap_memory failed");
 111     }
 112   } else if (!os::release_memory(base, size)) {
 113     fatal("os::release_memory failed");
 114   }
 115 }
 116 
 117 // Helper method
 118 static bool failed_to_reserve_as_requested(char* base, char* requested_address) {
 119   if (base == requested_address || requested_address == NULL) {
 120     return false; // did not fail
 121   }
 122 
 123   if (base != NULL) {
 124     // Different reserve address may be acceptable in other cases
 125     // but for compressed oops heap should be at requested address.
 126     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 127     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 128   }
 129   return true;
 130 }
 131 
 132 static bool use_explicit_large_pages(size_t page_size) {
 133   return !os::can_commit_large_page_memory() &&
 134          page_size != (size_t) os::vm_page_size();
 135 }
 136 
 137 static bool large_pages_requested() {
 138   return UseLargePages &&
 139          (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes));
 140 }
 141 
 142 static char* reserve_memory(char* requested_address, const size_t size,
 143                             const size_t alignment, int fd, bool exec) {
 144   char* base;
 145   // If the memory was requested at a particular address, use
 146   // os::attempt_reserve_memory_at() to avoid mapping over something
 147   // important.  If the reservation fails, return NULL.
 148   if (requested_address != 0) {
 149     assert(is_aligned(requested_address, alignment),
 150            "Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT,
 151            p2i(requested_address), alignment);
 152     base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec);
 153   } else {
 154     // Optimistically assume that the OS returns an aligned base pointer.
 155     // When reserving a large address range, most OSes seem to align to at
 156     // least 64K.
 157     base = map_or_reserve_memory(size, fd, exec);
 158     // Check alignment constraints. This is only needed when there is
 159     // no requested address.
 160     if (!is_aligned(base, alignment)) {
 161       // Base not aligned, retry.
 162       unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/);
 163       // Map using the requested alignment.
 164       base = map_or_reserve_memory_aligned(size, alignment, fd, exec);
 165     }
 166   }
 167 
 168   return base;
 169 }
 170 
 171 static char* reserve_memory_special(char* requested_address, const size_t size,
 172                                     const size_t alignment, const size_t page_size, bool exec) {
 173 
 174   log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, "
 175                       "alignment: " SIZE_FORMAT "%s",
 176                       byte_size_in_exact_unit(size), exact_unit_for_byte_size(size),
 177                       byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
 178 
 179   char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec);
 180   if (base != NULL) {
 181     // Check alignment constraints.
 182     assert(is_aligned(base, alignment),
 183            "reserve_memory_special() returned an unaligned address, base: " PTR_FORMAT
 184            " alignment: " SIZE_FORMAT_HEX,
 185            p2i(base), alignment);
 186   } else {
 187     if (large_pages_requested()) {
 188       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 189     }
 190   }
 191   return base;
 192 }
 193 
 194 void ReservedSpace::clear_members() {
 195   initialize_members(NULL, 0, 0, 0, false, false);
 196 }
 197 
 198 void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment,
 199                                        size_t page_size, bool special, bool executable) {
 200   _base = base;
 201   _size = size;
 202   _alignment = alignment;
 203   _page_size = page_size;
 204   _special = special;
 205   _executable = executable;
 206   _noaccess_prefix = 0;
 207 }
 208 
 209 void ReservedSpace::reserve(size_t size,
 210                             size_t alignment,
 211                             size_t page_size,
 212                             char* requested_address,
 213                             bool executable) {
 214   assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment");
 215 
 216   // There are basically three different cases that we need to handle below:
 217   // - Mapping backed by a file
 218   // - Mapping backed by explicit large pages
 219   // - Mapping backed by normal pages or transparent huge pages
 220   // The first two have restrictions that requires the whole mapping to be
 221   // committed up front. To record this the ReservedSpace is marked 'special'.
 222 
 223   if (_fd_for_heap != -1) {
 224     // When there is a backing file directory for this space then whether
 225     // large pages are allocated is up to the filesystem of the backing file.
 226     // So UseLargePages is not taken into account for this reservation.
 227     char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable);
 228     if (base != NULL) {
 229       initialize_members(base, size, alignment, os::vm_page_size(), true, executable);
 230     }
 231     // Always return, not possible to fall back to reservation not using a file.
 232     return;
 233   } else if (use_explicit_large_pages(page_size)) {
 234     // System can't commit large pages i.e. use transparent huge pages and
 235     // the caller requested large pages. To satisfy this request we use
 236     // explicit large pages and these have to be committed up front to ensure
 237     // no reservations are lost.
 238 
 239     char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable);
 240     if (base != NULL) {
 241       // Successful reservation using large pages.
 242       initialize_members(base, size, alignment, page_size, true, executable);
 243       return;
 244     }
 245     // Failed to reserve explicit large pages, fall back to normal reservation.
 246     page_size = os::vm_page_size();
 247   }
 248 
 249   // Not a 'special' reservation.
 250   char* base = reserve_memory(requested_address, size, alignment, -1, executable);
 251   if (base != NULL) {
 252     // Successful mapping.
 253     initialize_members(base, size, alignment, page_size, false, executable);
 254   }
 255 }
 256 
 257 void ReservedSpace::initialize(size_t size,
 258                                size_t alignment,
 259                                size_t page_size,
 260                                char* requested_address,
 261                                bool executable) {
 262   const size_t granularity = os::vm_allocation_granularity();
 263   assert((size & (granularity - 1)) == 0,
 264          "size not aligned to os::vm_allocation_granularity()");
 265   assert((alignment & (granularity - 1)) == 0,
 266          "alignment not aligned to os::vm_allocation_granularity()");
 267   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 268          "not a power of 2");
 269   assert(page_size >= (size_t) os::vm_page_size(), "Invalid page size");
 270   assert(is_power_of_2(page_size), "Invalid page size");
 271 
 272   clear_members();
 273 
 274   if (size == 0) {
 275     return;
 276   }
 277 
 278   // Adjust alignment to not be 0.
 279   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 280 
 281   // Reserve the memory.
 282   reserve(size, alignment, page_size, requested_address, executable);
 283 
 284   // Check that the requested address is used if given.
 285   if (failed_to_reserve_as_requested(_base, requested_address)) {
 286     // OS ignored the requested address, release the reservation.
 287     release();
 288     return;
 289   }
 290 }
 291 
 292 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) {
 293   assert(partition_size <= size(), "partition failed");
 294   ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable());
 295   return result;
 296 }
 297 
 298 
 299 ReservedSpace
 300 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 301   assert(partition_size <= size(), "partition failed");
 302   ReservedSpace result(base() + partition_size, size() - partition_size,
 303                        alignment, page_size(), special(), executable());
 304   return result;
 305 }
 306 
 307 
 308 size_t ReservedSpace::page_align_size_up(size_t size) {
 309   return align_up(size, os::vm_page_size());
 310 }
 311 
 312 
 313 size_t ReservedSpace::page_align_size_down(size_t size) {
 314   return align_down(size, os::vm_page_size());
 315 }
 316 
 317 
 318 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 319   return align_up(size, os::vm_allocation_granularity());
 320 }
 321 
 322 void ReservedSpace::release() {
 323   if (is_reserved()) {
 324     char *real_base = _base - _noaccess_prefix;
 325     const size_t real_size = _size + _noaccess_prefix;
 326     if (special()) {
 327       if (_fd_for_heap != -1) {
 328         os::unmap_memory(real_base, real_size);
 329       } else {
 330         os::release_memory_special(real_base, real_size);
 331       }
 332     } else{
 333       os::release_memory(real_base, real_size);
 334     }
 335     clear_members();
 336   }
 337 }
 338 
 339 static size_t noaccess_prefix_size(size_t alignment) {
 340   return lcm(os::vm_page_size(), alignment);
 341 }
 342 
 343 void ReservedHeapSpace::establish_noaccess_prefix() {
 344   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 345   _noaccess_prefix = noaccess_prefix_size(_alignment);
 346 
 347   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 348     if (true
 349         WIN64_ONLY(&& !UseLargePages)
 350         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 351       // Protect memory at the base of the allocated region.
 352       // If special, the page was committed (only matters on windows)
 353       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 354         fatal("cannot protect protection page");
 355       }
 356       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 357                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 358                                  p2i(_base),
 359                                  _noaccess_prefix);
 360       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
 361     } else {
 362       CompressedOops::set_use_implicit_null_checks(false);
 363     }
 364   }
 365 
 366   _base += _noaccess_prefix;
 367   _size -= _noaccess_prefix;
 368   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 369 }
 370 
 371 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 372 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 373 // might still fulfill the wishes of the caller.
 374 // Assures the memory is aligned to 'alignment'.
 375 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 376 void ReservedHeapSpace::try_reserve_heap(size_t size,
 377                                          size_t alignment,
 378                                          size_t page_size,
 379                                          char* requested_address) {
 380   if (_base != NULL) {
 381     // We tried before, but we didn't like the address delivered.
 382     release();
 383   }
 384 
 385   // Try to reserve the memory for the heap.
 386   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 387                              " heap of size " SIZE_FORMAT_HEX,
 388                              p2i(requested_address),
 389                              size);
 390 
 391   reserve(size, alignment, page_size, requested_address, false);
 392 
 393   // Check alignment constraints.
 394   if (is_reserved() && !is_aligned(_base, _alignment)) {
 395     // Base not aligned, retry.
 396     release();
 397   }
 398 }
 399 
 400 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 401                                           char *lowest_start,
 402                                           size_t attach_point_alignment,
 403                                           char *aligned_heap_base_min_address,
 404                                           char *upper_bound,
 405                                           size_t size,
 406                                           size_t alignment,
 407                                           size_t page_size) {
 408   const size_t attach_range = highest_start - lowest_start;
 409   // Cap num_attempts at possible number.
 410   // At least one is possible even for 0 sized attach range.
 411   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 412   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 413 
 414   const size_t stepsize = (attach_range == 0) ? // Only one try.
 415     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 416 
 417   // Try attach points from top to bottom.
 418   char* attach_point = highest_start;
 419   while (attach_point >= lowest_start  &&
 420          attach_point <= highest_start &&  // Avoid wrap around.
 421          ((_base == NULL) ||
 422           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 423     try_reserve_heap(size, alignment, page_size, attach_point);
 424     attach_point -= stepsize;
 425   }
 426 }
 427 
 428 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 429 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 430 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 431 
 432 // Helper for heap allocation. Returns an array with addresses
 433 // (OS-specific) which are suited for disjoint base mode. Array is
 434 // NULL terminated.
 435 static char** get_attach_addresses_for_disjoint_mode() {
 436   static uint64_t addresses[] = {
 437      2 * SIZE_32G,
 438      3 * SIZE_32G,
 439      4 * SIZE_32G,
 440      8 * SIZE_32G,
 441     10 * SIZE_32G,
 442      1 * SIZE_64K * SIZE_32G,
 443      2 * SIZE_64K * SIZE_32G,
 444      3 * SIZE_64K * SIZE_32G,
 445      4 * SIZE_64K * SIZE_32G,
 446     16 * SIZE_64K * SIZE_32G,
 447     32 * SIZE_64K * SIZE_32G,
 448     34 * SIZE_64K * SIZE_32G,
 449     0
 450   };
 451 
 452   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 453   // the array is sorted.
 454   uint i = 0;
 455   while (addresses[i] != 0 &&
 456          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 457     i++;
 458   }
 459   uint start = i;
 460 
 461   // Avoid more steps than requested.
 462   i = 0;
 463   while (addresses[start+i] != 0) {
 464     if (i == HeapSearchSteps) {
 465       addresses[start+i] = 0;
 466       break;
 467     }
 468     i++;
 469   }
 470 
 471   return (char**) &addresses[start];
 472 }
 473 
 474 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) {
 475   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 476             "can not allocate compressed oop heap for this size");
 477   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 478 
 479   const size_t granularity = os::vm_allocation_granularity();
 480   assert((size & (granularity - 1)) == 0,
 481          "size not aligned to os::vm_allocation_granularity()");
 482   assert((alignment & (granularity - 1)) == 0,
 483          "alignment not aligned to os::vm_allocation_granularity()");
 484   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 485          "not a power of 2");
 486 
 487   // The necessary attach point alignment for generated wish addresses.
 488   // This is needed to increase the chance of attaching for mmap and shmat.
 489   const size_t os_attach_point_alignment =
 490     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 491     NOT_AIX(os::vm_allocation_granularity());
 492   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 493 
 494   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 495   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 496     noaccess_prefix_size(alignment) : 0;
 497 
 498   // Attempt to alloc at user-given address.
 499   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 500     try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address);
 501     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 502       release();
 503     }
 504   }
 505 
 506   // Keep heap at HeapBaseMinAddress.
 507   if (_base == NULL) {
 508 
 509     // Try to allocate the heap at addresses that allow efficient oop compression.
 510     // Different schemes are tried, in order of decreasing optimization potential.
 511     //
 512     // For this, try_reserve_heap() is called with the desired heap base addresses.
 513     // A call into the os layer to allocate at a given address can return memory
 514     // at a different address than requested.  Still, this might be memory at a useful
 515     // address. try_reserve_heap() always returns this allocated memory, as only here
 516     // the criteria for a good heap are checked.
 517 
 518     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 519     // Give it several tries from top of range to bottom.
 520     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 521 
 522       // Calc address range within we try to attach (range of possible start addresses).
 523       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 524       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 525       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 526                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size);
 527     }
 528 
 529     // zerobased: Attempt to allocate in the lower 32G.
 530     // But leave room for the compressed class pointers, which is allocated above
 531     // the heap.
 532     // Note Lilliput: the advantages of this strategy were questionable before
 533     //  (since CDS=off + Compressed oops + heap large enough to suffocate us out of lower 32g
 534     //  is rare) and with Lilliput the encoding range drastically shrank. We may just do away
 535     //  with this altogether.
 536     char *zerobased_max = (char *)OopEncodingHeapMax;
 537     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 538     // For small heaps, save some space for compressed class pointer
 539     // space so it can be decoded with no base.
 540     if (UseCompressedClassPointers && !UseSharedSpaces &&
 541         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 542         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 543       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 544     }
 545 
 546     // Give it several tries from top of range to bottom.
 547     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 548         ((_base == NULL) ||                        // No previous try succeeded.
 549          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 550 
 551       // Calc address range within we try to attach (range of possible start addresses).
 552       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 553       // Need to be careful about size being guaranteed to be less
 554       // than UnscaledOopHeapMax due to type constraints.
 555       char *lowest_start = aligned_heap_base_min_address;
 556       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 557       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 558         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 559       }
 560       lowest_start = align_up(lowest_start, attach_point_alignment);
 561       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 562                         aligned_heap_base_min_address, zerobased_max, size, alignment, page_size);
 563     }
 564 
 565     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 566     // implement null checks.
 567     noaccess_prefix = noaccess_prefix_size(alignment);
 568 
 569     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 570     char** addresses = get_attach_addresses_for_disjoint_mode();
 571     int i = 0;
 572     while (addresses[i] &&                                 // End of array not yet reached.
 573            ((_base == NULL) ||                             // No previous try succeeded.
 574             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 575              !CompressedOops::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 576       char* const attach_point = addresses[i];
 577       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 578       try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point);
 579       i++;
 580     }
 581 
 582     // Last, desperate try without any placement.
 583     if (_base == NULL) {
 584       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 585       initialize(size + noaccess_prefix, alignment, page_size, NULL, false);
 586     }
 587   }
 588 }
 589 
 590 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() {
 591 
 592   if (size == 0) {
 593     return;
 594   }
 595 
 596   if (heap_allocation_directory != NULL) {
 597     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 598     if (_fd_for_heap == -1) {
 599       vm_exit_during_initialization(
 600         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 601     }
 602     // When there is a backing file directory for this space then whether
 603     // large pages are allocated is up to the filesystem of the backing file.
 604     // If requested, let the user know that explicit large pages can't be used.
 605     if (use_explicit_large_pages(page_size) && large_pages_requested()) {
 606       log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set.");
 607     }
 608   }
 609 
 610   // Heap size should be aligned to alignment, too.
 611   guarantee(is_aligned(size, alignment), "set by caller");
 612 
 613   if (UseCompressedOops) {
 614     initialize_compressed_heap(size, alignment, page_size);
 615     if (_size > size) {
 616       // We allocated heap with noaccess prefix.
 617       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 618       // if we had to try at arbitrary address.
 619       establish_noaccess_prefix();
 620     }
 621   } else {
 622     initialize(size, alignment, page_size, NULL, false);
 623   }
 624 
 625   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 626          "area must be distinguishable from marks for mark-sweep");
 627   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 628          "area must be distinguishable from marks for mark-sweep");
 629 
 630   if (base() != NULL) {
 631     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 632   }
 633 
 634   if (_fd_for_heap != -1) {
 635     os::close(_fd_for_heap);
 636   }
 637 }
 638 
 639 MemRegion ReservedHeapSpace::region() const {
 640   return MemRegion((HeapWord*)base(), (HeapWord*)end());
 641 }
 642 
 643 // Reserve space for code segment.  Same as Java heap only we mark this as
 644 // executable.
 645 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 646                                      size_t rs_align,
 647                                      size_t rs_page_size) : ReservedSpace() {
 648   initialize(r_size, rs_align, rs_page_size, /*requested address*/ NULL, /*executable*/ true);
 649   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 650 }
 651 
 652 // VirtualSpace
 653 
 654 VirtualSpace::VirtualSpace() {
 655   _low_boundary           = NULL;
 656   _high_boundary          = NULL;
 657   _low                    = NULL;
 658   _high                   = NULL;
 659   _lower_high             = NULL;
 660   _middle_high            = NULL;
 661   _upper_high             = NULL;
 662   _lower_high_boundary    = NULL;
 663   _middle_high_boundary   = NULL;
 664   _upper_high_boundary    = NULL;
 665   _lower_alignment        = 0;
 666   _middle_alignment       = 0;
 667   _upper_alignment        = 0;
 668   _special                = false;
 669   _executable             = false;
 670 }
 671 
 672 
 673 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 674   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 675   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 676 }
 677 
 678 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 679   if(!rs.is_reserved()) return false;  // allocation failed.
 680   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 681   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 682 
 683   _low_boundary  = rs.base();
 684   _high_boundary = low_boundary() + rs.size();
 685 
 686   _low = low_boundary();
 687   _high = low();
 688 
 689   _special = rs.special();
 690   _executable = rs.executable();
 691 
 692   // When a VirtualSpace begins life at a large size, make all future expansion
 693   // and shrinking occur aligned to a granularity of large pages.  This avoids
 694   // fragmentation of physical addresses that inhibits the use of large pages
 695   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 696   // page size, the only spaces that get handled this way are codecache and
 697   // the heap itself, both of which provide a substantial performance
 698   // boost in many benchmarks when covered by large pages.
 699   //
 700   // No attempt is made to force large page alignment at the very top and
 701   // bottom of the space if they are not aligned so already.
 702   _lower_alignment  = os::vm_page_size();
 703   _middle_alignment = max_commit_granularity;
 704   _upper_alignment  = os::vm_page_size();
 705 
 706   // End of each region
 707   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 708   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 709   _upper_high_boundary = high_boundary();
 710 
 711   // High address of each region
 712   _lower_high = low_boundary();
 713   _middle_high = lower_high_boundary();
 714   _upper_high = middle_high_boundary();
 715 
 716   // commit to initial size
 717   if (committed_size > 0) {
 718     if (!expand_by(committed_size)) {
 719       return false;
 720     }
 721   }
 722   return true;
 723 }
 724 
 725 
 726 VirtualSpace::~VirtualSpace() {
 727   release();
 728 }
 729 
 730 
 731 void VirtualSpace::release() {
 732   // This does not release memory it reserved.
 733   // Caller must release via rs.release();
 734   _low_boundary           = NULL;
 735   _high_boundary          = NULL;
 736   _low                    = NULL;
 737   _high                   = NULL;
 738   _lower_high             = NULL;
 739   _middle_high            = NULL;
 740   _upper_high             = NULL;
 741   _lower_high_boundary    = NULL;
 742   _middle_high_boundary   = NULL;
 743   _upper_high_boundary    = NULL;
 744   _lower_alignment        = 0;
 745   _middle_alignment       = 0;
 746   _upper_alignment        = 0;
 747   _special                = false;
 748   _executable             = false;
 749 }
 750 
 751 
 752 size_t VirtualSpace::committed_size() const {
 753   return pointer_delta(high(), low(), sizeof(char));
 754 }
 755 
 756 
 757 size_t VirtualSpace::reserved_size() const {
 758   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 759 }
 760 
 761 
 762 size_t VirtualSpace::uncommitted_size()  const {
 763   return reserved_size() - committed_size();
 764 }
 765 
 766 size_t VirtualSpace::actual_committed_size() const {
 767   // Special VirtualSpaces commit all reserved space up front.
 768   if (special()) {
 769     return reserved_size();
 770   }
 771 
 772   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 773   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 774   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 775 
 776 #ifdef ASSERT
 777   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 778   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 779   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 780 
 781   if (committed_high > 0) {
 782     assert(committed_low == lower, "Must be");
 783     assert(committed_middle == middle, "Must be");
 784   }
 785 
 786   if (committed_middle > 0) {
 787     assert(committed_low == lower, "Must be");
 788   }
 789   if (committed_middle < middle) {
 790     assert(committed_high == 0, "Must be");
 791   }
 792 
 793   if (committed_low < lower) {
 794     assert(committed_high == 0, "Must be");
 795     assert(committed_middle == 0, "Must be");
 796   }
 797 #endif
 798 
 799   return committed_low + committed_middle + committed_high;
 800 }
 801 
 802 
 803 bool VirtualSpace::contains(const void* p) const {
 804   return low() <= (const char*) p && (const char*) p < high();
 805 }
 806 
 807 static void pretouch_expanded_memory(void* start, void* end) {
 808   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 809   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 810 
 811   os::pretouch_memory(start, end);
 812 }
 813 
 814 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 815   if (os::commit_memory(start, size, alignment, executable)) {
 816     if (pre_touch || AlwaysPreTouch) {
 817       pretouch_expanded_memory(start, start + size);
 818     }
 819     return true;
 820   }
 821 
 822   debug_only(warning(
 823       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 824       " size=" SIZE_FORMAT ", executable=%d) failed",
 825       p2i(start), p2i(start + size), size, executable);)
 826 
 827   return false;
 828 }
 829 
 830 /*
 831    First we need to determine if a particular virtual space is using large
 832    pages.  This is done at the initialize function and only virtual spaces
 833    that are larger than LargePageSizeInBytes use large pages.  Once we
 834    have determined this, all expand_by and shrink_by calls must grow and
 835    shrink by large page size chunks.  If a particular request
 836    is within the current large page, the call to commit and uncommit memory
 837    can be ignored.  In the case that the low and high boundaries of this
 838    space is not large page aligned, the pages leading to the first large
 839    page address and the pages after the last large page address must be
 840    allocated with default pages.
 841 */
 842 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 843   if (uncommitted_size() < bytes) {
 844     return false;
 845   }
 846 
 847   if (special()) {
 848     // don't commit memory if the entire space is pinned in memory
 849     _high += bytes;
 850     return true;
 851   }
 852 
 853   char* previous_high = high();
 854   char* unaligned_new_high = high() + bytes;
 855   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 856 
 857   // Calculate where the new high for each of the regions should be.  If
 858   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 859   // then the unaligned lower and upper new highs would be the
 860   // lower_high() and upper_high() respectively.
 861   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 862   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 863   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 864 
 865   // Align the new highs based on the regions alignment.  lower and upper
 866   // alignment will always be default page size.  middle alignment will be
 867   // LargePageSizeInBytes if the actual size of the virtual space is in
 868   // fact larger than LargePageSizeInBytes.
 869   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 870   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 871   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 872 
 873   // Determine which regions need to grow in this expand_by call.
 874   // If you are growing in the lower region, high() must be in that
 875   // region so calculate the size based on high().  For the middle and
 876   // upper regions, determine the starting point of growth based on the
 877   // location of high().  By getting the MAX of the region's low address
 878   // (or the previous region's high address) and high(), we can tell if it
 879   // is an intra or inter region growth.
 880   size_t lower_needs = 0;
 881   if (aligned_lower_new_high > lower_high()) {
 882     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 883   }
 884   size_t middle_needs = 0;
 885   if (aligned_middle_new_high > middle_high()) {
 886     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 887   }
 888   size_t upper_needs = 0;
 889   if (aligned_upper_new_high > upper_high()) {
 890     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 891   }
 892 
 893   // Check contiguity.
 894   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 895          "high address must be contained within the region");
 896   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 897          "high address must be contained within the region");
 898   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 899          "high address must be contained within the region");
 900 
 901   // Commit regions
 902   if (lower_needs > 0) {
 903     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 904     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 905       return false;
 906     }
 907     _lower_high += lower_needs;
 908   }
 909 
 910   if (middle_needs > 0) {
 911     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 912     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 913       return false;
 914     }
 915     _middle_high += middle_needs;
 916   }
 917 
 918   if (upper_needs > 0) {
 919     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 920     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 921       return false;
 922     }
 923     _upper_high += upper_needs;
 924   }
 925 
 926   _high += bytes;
 927   return true;
 928 }
 929 
 930 // A page is uncommitted if the contents of the entire page is deemed unusable.
 931 // Continue to decrement the high() pointer until it reaches a page boundary
 932 // in which case that particular page can now be uncommitted.
 933 void VirtualSpace::shrink_by(size_t size) {
 934   if (committed_size() < size)
 935     fatal("Cannot shrink virtual space to negative size");
 936 
 937   if (special()) {
 938     // don't uncommit if the entire space is pinned in memory
 939     _high -= size;
 940     return;
 941   }
 942 
 943   char* unaligned_new_high = high() - size;
 944   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 945 
 946   // Calculate new unaligned address
 947   char* unaligned_upper_new_high =
 948     MAX2(unaligned_new_high, middle_high_boundary());
 949   char* unaligned_middle_new_high =
 950     MAX2(unaligned_new_high, lower_high_boundary());
 951   char* unaligned_lower_new_high =
 952     MAX2(unaligned_new_high, low_boundary());
 953 
 954   // Align address to region's alignment
 955   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 956   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 957   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 958 
 959   // Determine which regions need to shrink
 960   size_t upper_needs = 0;
 961   if (aligned_upper_new_high < upper_high()) {
 962     upper_needs =
 963       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 964   }
 965   size_t middle_needs = 0;
 966   if (aligned_middle_new_high < middle_high()) {
 967     middle_needs =
 968       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 969   }
 970   size_t lower_needs = 0;
 971   if (aligned_lower_new_high < lower_high()) {
 972     lower_needs =
 973       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 974   }
 975 
 976   // Check contiguity.
 977   assert(middle_high_boundary() <= upper_high() &&
 978          upper_high() <= upper_high_boundary(),
 979          "high address must be contained within the region");
 980   assert(lower_high_boundary() <= middle_high() &&
 981          middle_high() <= middle_high_boundary(),
 982          "high address must be contained within the region");
 983   assert(low_boundary() <= lower_high() &&
 984          lower_high() <= lower_high_boundary(),
 985          "high address must be contained within the region");
 986 
 987   // Uncommit
 988   if (upper_needs > 0) {
 989     assert(middle_high_boundary() <= aligned_upper_new_high &&
 990            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 991            "must not shrink beyond region");
 992     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) {
 993       debug_only(warning("os::uncommit_memory failed"));
 994       return;
 995     } else {
 996       _upper_high -= upper_needs;
 997     }
 998   }
 999   if (middle_needs > 0) {
1000     assert(lower_high_boundary() <= aligned_middle_new_high &&
1001            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1002            "must not shrink beyond region");
1003     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) {
1004       debug_only(warning("os::uncommit_memory failed"));
1005       return;
1006     } else {
1007       _middle_high -= middle_needs;
1008     }
1009   }
1010   if (lower_needs > 0) {
1011     assert(low_boundary() <= aligned_lower_new_high &&
1012            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1013            "must not shrink beyond region");
1014     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) {
1015       debug_only(warning("os::uncommit_memory failed"));
1016       return;
1017     } else {
1018       _lower_high -= lower_needs;
1019     }
1020   }
1021 
1022   _high -= size;
1023 }
1024 
1025 #ifndef PRODUCT
1026 void VirtualSpace::check_for_contiguity() {
1027   // Check contiguity.
1028   assert(low_boundary() <= lower_high() &&
1029          lower_high() <= lower_high_boundary(),
1030          "high address must be contained within the region");
1031   assert(lower_high_boundary() <= middle_high() &&
1032          middle_high() <= middle_high_boundary(),
1033          "high address must be contained within the region");
1034   assert(middle_high_boundary() <= upper_high() &&
1035          upper_high() <= upper_high_boundary(),
1036          "high address must be contained within the region");
1037   assert(low() >= low_boundary(), "low");
1038   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1039   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1040   assert(high() <= upper_high(), "upper high");
1041 }
1042 
1043 void VirtualSpace::print_on(outputStream* out) const {
1044   out->print   ("Virtual space:");
1045   if (special()) out->print(" (pinned in memory)");
1046   out->cr();
1047   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1048   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1049   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1050   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1051 }
1052 
1053 void VirtualSpace::print() const {
1054   print_on(tty);
1055 }
1056 
1057 #endif