1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "logging/log.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "memory/virtualspace.hpp" 29 #include "nmt/memTracker.hpp" 30 #include "oops/compressedKlass.hpp" 31 #include "oops/compressedOops.hpp" 32 #include "oops/markWord.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/globals_extension.hpp" 35 #include "runtime/java.hpp" 36 #include "runtime/os.hpp" 37 #include "utilities/align.hpp" 38 #include "utilities/formatBuffer.hpp" 39 #include "utilities/powerOfTwo.hpp" 40 41 // ReservedSpace 42 43 // Dummy constructor 44 ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0), 45 _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) { 46 } 47 48 ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) { 49 // Want to use large pages where possible. If the size is 50 // not large page aligned the mapping will be a mix of 51 // large and normal pages. 52 size_t page_size = os::page_size_for_region_unaligned(size, 1); 53 size_t alignment = os::vm_allocation_granularity(); 54 initialize(size, alignment, page_size, nullptr, false); 55 } 56 57 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) { 58 // When a page size is given we don't want to mix large 59 // and normal pages. If the size is not a multiple of the 60 // page size it will be aligned up to achieve this. 61 size_t alignment = os::vm_allocation_granularity();; 62 if (preferred_page_size != os::vm_page_size()) { 63 alignment = MAX2(preferred_page_size, alignment); 64 size = align_up(size, alignment); 65 } 66 initialize(size, alignment, preferred_page_size, nullptr, false); 67 } 68 69 ReservedSpace::ReservedSpace(size_t size, 70 size_t alignment, 71 size_t page_size, 72 char* requested_address) : _fd_for_heap(-1) { 73 initialize(size, alignment, page_size, requested_address, false); 74 } 75 76 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size, 77 bool special, bool executable) : _fd_for_heap(-1) { 78 assert((size % os::vm_allocation_granularity()) == 0, 79 "size not allocation aligned"); 80 initialize_members(base, size, alignment, page_size, special, executable); 81 } 82 83 // Helper method 84 static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) { 85 if (fd != -1) { 86 return os::attempt_map_memory_to_file_at(base, size, fd); 87 } 88 return os::attempt_reserve_memory_at(base, size, executable); 89 } 90 91 // Helper method 92 static char* map_or_reserve_memory(size_t size, int fd, bool executable) { 93 if (fd != -1) { 94 return os::map_memory_to_file(size, fd); 95 } 96 return os::reserve_memory(size, executable); 97 } 98 99 // Helper method 100 static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) { 101 if (fd != -1) { 102 return os::map_memory_to_file_aligned(size, alignment, fd); 103 } 104 return os::reserve_memory_aligned(size, alignment, executable); 105 } 106 107 // Helper method 108 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) { 109 if (is_file_mapped) { 110 if (!os::unmap_memory(base, size)) { 111 fatal("os::unmap_memory failed"); 112 } 113 } else if (!os::release_memory(base, size)) { 114 fatal("os::release_memory failed"); 115 } 116 } 117 118 // Helper method 119 static bool failed_to_reserve_as_requested(char* base, char* requested_address) { 120 if (base == requested_address || requested_address == nullptr) { 121 return false; // did not fail 122 } 123 124 if (base != nullptr) { 125 // Different reserve address may be acceptable in other cases 126 // but for compressed oops heap should be at requested address. 127 assert(UseCompressedOops, "currently requested address used only for compressed oops"); 128 log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address)); 129 } 130 return true; 131 } 132 133 static bool use_explicit_large_pages(size_t page_size) { 134 return !os::can_commit_large_page_memory() && 135 page_size != os::vm_page_size(); 136 } 137 138 static bool large_pages_requested() { 139 return UseLargePages && 140 (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes)); 141 } 142 143 static void log_on_large_pages_failure(char* req_addr, size_t bytes) { 144 if (large_pages_requested()) { 145 // Compressed oops logging. 146 log_debug(gc, heap, coops)("Reserve regular memory without large pages"); 147 // JVM style warning that we did not succeed in using large pages. 148 char msg[128]; 149 jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory using large pages. " 150 "req_addr: " PTR_FORMAT " bytes: " SIZE_FORMAT, 151 req_addr, bytes); 152 warning("%s", msg); 153 } 154 } 155 156 static char* reserve_memory(char* requested_address, const size_t size, 157 const size_t alignment, int fd, bool exec) { 158 char* base; 159 // If the memory was requested at a particular address, use 160 // os::attempt_reserve_memory_at() to avoid mapping over something 161 // important. If the reservation fails, return null. 162 if (requested_address != nullptr) { 163 assert(is_aligned(requested_address, alignment), 164 "Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT, 165 p2i(requested_address), alignment); 166 base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec); 167 } else { 168 // Optimistically assume that the OS returns an aligned base pointer. 169 // When reserving a large address range, most OSes seem to align to at 170 // least 64K. 171 base = map_or_reserve_memory(size, fd, exec); 172 // Check alignment constraints. This is only needed when there is 173 // no requested address. 174 if (!is_aligned(base, alignment)) { 175 // Base not aligned, retry. 176 unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/); 177 // Map using the requested alignment. 178 base = map_or_reserve_memory_aligned(size, alignment, fd, exec); 179 } 180 } 181 182 return base; 183 } 184 185 static char* reserve_memory_special(char* requested_address, const size_t size, 186 const size_t alignment, const size_t page_size, bool exec) { 187 188 log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, " 189 "alignment: " SIZE_FORMAT "%s", 190 byte_size_in_exact_unit(size), exact_unit_for_byte_size(size), 191 byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment)); 192 193 char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec); 194 if (base != nullptr) { 195 // Check alignment constraints. 196 assert(is_aligned(base, alignment), 197 "reserve_memory_special() returned an unaligned address, base: " PTR_FORMAT 198 " alignment: " SIZE_FORMAT_X, 199 p2i(base), alignment); 200 } 201 return base; 202 } 203 204 void ReservedSpace::clear_members() { 205 initialize_members(nullptr, 0, 0, 0, false, false); 206 } 207 208 void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment, 209 size_t page_size, bool special, bool executable) { 210 _base = base; 211 _size = size; 212 _alignment = alignment; 213 _page_size = page_size; 214 _special = special; 215 _executable = executable; 216 _noaccess_prefix = 0; 217 } 218 219 void ReservedSpace::reserve(size_t size, 220 size_t alignment, 221 size_t page_size, 222 char* requested_address, 223 bool executable) { 224 assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment"); 225 226 // There are basically three different cases that we need to handle below: 227 // 1. Mapping backed by a file 228 // 2. Mapping backed by explicit large pages 229 // 3. Mapping backed by normal pages or transparent huge pages 230 // The first two have restrictions that requires the whole mapping to be 231 // committed up front. To record this the ReservedSpace is marked 'special'. 232 233 // == Case 1 == 234 if (_fd_for_heap != -1) { 235 // When there is a backing file directory for this space then whether 236 // large pages are allocated is up to the filesystem of the backing file. 237 // So UseLargePages is not taken into account for this reservation. 238 char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable); 239 if (base != nullptr) { 240 initialize_members(base, size, alignment, os::vm_page_size(), true, executable); 241 } 242 // Always return, not possible to fall back to reservation not using a file. 243 return; 244 } 245 246 // == Case 2 == 247 if (use_explicit_large_pages(page_size)) { 248 // System can't commit large pages i.e. use transparent huge pages and 249 // the caller requested large pages. To satisfy this request we use 250 // explicit large pages and these have to be committed up front to ensure 251 // no reservations are lost. 252 do { 253 char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable); 254 if (base != nullptr) { 255 // Successful reservation using large pages. 256 initialize_members(base, size, alignment, page_size, true, executable); 257 return; 258 } 259 page_size = os::page_sizes().next_smaller(page_size); 260 } while (page_size > os::vm_page_size()); 261 262 // Failed to reserve explicit large pages, do proper logging. 263 log_on_large_pages_failure(requested_address, size); 264 // Now fall back to normal reservation. 265 assert(page_size == os::vm_page_size(), "inv"); 266 } 267 268 // == Case 3 == 269 char* base = reserve_memory(requested_address, size, alignment, -1, executable); 270 if (base != nullptr) { 271 // Successful mapping. 272 initialize_members(base, size, alignment, page_size, false, executable); 273 } 274 } 275 276 void ReservedSpace::initialize(size_t size, 277 size_t alignment, 278 size_t page_size, 279 char* requested_address, 280 bool executable) { 281 const size_t granularity = os::vm_allocation_granularity(); 282 assert((size & (granularity - 1)) == 0, 283 "size not aligned to os::vm_allocation_granularity()"); 284 assert((alignment & (granularity - 1)) == 0, 285 "alignment not aligned to os::vm_allocation_granularity()"); 286 assert(alignment == 0 || is_power_of_2((intptr_t)alignment), 287 "not a power of 2"); 288 assert(page_size >= os::vm_page_size(), "Invalid page size"); 289 assert(is_power_of_2(page_size), "Invalid page size"); 290 291 clear_members(); 292 293 if (size == 0) { 294 return; 295 } 296 297 // Adjust alignment to not be 0. 298 alignment = MAX2(alignment, os::vm_page_size()); 299 300 // Reserve the memory. 301 reserve(size, alignment, page_size, requested_address, executable); 302 303 // Check that the requested address is used if given. 304 if (failed_to_reserve_as_requested(_base, requested_address)) { 305 // OS ignored the requested address, release the reservation. 306 release(); 307 return; 308 } 309 } 310 311 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) { 312 assert(partition_size <= size(), "partition failed"); 313 ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable()); 314 return result; 315 } 316 317 ReservedSpace ReservedSpace::last_part(size_t partition_size, size_t alignment) { 318 assert(partition_size <= size(), "partition failed"); 319 ReservedSpace result(base() + partition_size, size() - partition_size, 320 alignment, page_size(), special(), executable()); 321 return result; 322 } 323 324 ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size, size_t alignment) { 325 assert(offset + partition_size <= size(), "partition failed"); 326 ReservedSpace result(base() + offset, partition_size, alignment, page_size(), special(), executable()); 327 return result; 328 } 329 330 size_t ReservedSpace::page_align_size_up(size_t size) { 331 return align_up(size, os::vm_page_size()); 332 } 333 334 335 size_t ReservedSpace::page_align_size_down(size_t size) { 336 return align_down(size, os::vm_page_size()); 337 } 338 339 340 size_t ReservedSpace::allocation_align_size_up(size_t size) { 341 return align_up(size, os::vm_allocation_granularity()); 342 } 343 344 void ReservedSpace::release() { 345 if (is_reserved()) { 346 char *real_base = _base - _noaccess_prefix; 347 const size_t real_size = _size + _noaccess_prefix; 348 if (special()) { 349 if (_fd_for_heap != -1) { 350 os::unmap_memory(real_base, real_size); 351 } else { 352 os::release_memory_special(real_base, real_size); 353 } 354 } else{ 355 os::release_memory(real_base, real_size); 356 } 357 clear_members(); 358 } 359 } 360 361 // Put a ReservedSpace over an existing range 362 ReservedSpace ReservedSpace::space_for_range(char* base, size_t size, size_t alignment, 363 size_t page_size, bool special, bool executable) { 364 assert(is_aligned(base, os::vm_allocation_granularity()), "Unaligned base"); 365 assert(is_aligned(size, os::vm_page_size()), "Unaligned size"); 366 assert(os::page_sizes().contains(page_size), "Invalid pagesize"); 367 ReservedSpace space; 368 space.initialize_members(base, size, alignment, page_size, special, executable); 369 return space; 370 } 371 372 static size_t noaccess_prefix_size(size_t alignment) { 373 return lcm(os::vm_page_size(), alignment); 374 } 375 376 void ReservedHeapSpace::establish_noaccess_prefix() { 377 assert(_alignment >= os::vm_page_size(), "must be at least page size big"); 378 _noaccess_prefix = noaccess_prefix_size(_alignment); 379 380 if (base() && base() + _size > (char *)OopEncodingHeapMax) { 381 if (true 382 WIN64_ONLY(&& !UseLargePages) 383 AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) { 384 // Protect memory at the base of the allocated region. 385 // If special, the page was committed (only matters on windows) 386 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) { 387 fatal("cannot protect protection page"); 388 } 389 log_debug(gc, heap, coops)("Protected page at the reserved heap base: " 390 PTR_FORMAT " / " INTX_FORMAT " bytes", 391 p2i(_base), 392 _noaccess_prefix); 393 assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?"); 394 } else { 395 CompressedOops::set_use_implicit_null_checks(false); 396 } 397 } 398 399 _base += _noaccess_prefix; 400 _size -= _noaccess_prefix; 401 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment"); 402 } 403 404 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'. 405 // Does not check whether the reserved memory actually is at requested_address, as the memory returned 406 // might still fulfill the wishes of the caller. 407 // Assures the memory is aligned to 'alignment'. 408 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first. 409 void ReservedHeapSpace::try_reserve_heap(size_t size, 410 size_t alignment, 411 size_t page_size, 412 char* requested_address) { 413 if (_base != nullptr) { 414 // We tried before, but we didn't like the address delivered. 415 release(); 416 } 417 418 // Try to reserve the memory for the heap. 419 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT 420 " heap of size " SIZE_FORMAT_X, 421 p2i(requested_address), 422 size); 423 424 reserve(size, alignment, page_size, requested_address, false); 425 426 // Check alignment constraints. 427 if (is_reserved() && !is_aligned(_base, _alignment)) { 428 // Base not aligned, retry. 429 release(); 430 } 431 } 432 433 void ReservedHeapSpace::try_reserve_range(char *highest_start, 434 char *lowest_start, 435 size_t attach_point_alignment, 436 char *aligned_heap_base_min_address, 437 char *upper_bound, 438 size_t size, 439 size_t alignment, 440 size_t page_size) { 441 const size_t attach_range = highest_start - lowest_start; 442 // Cap num_attempts at possible number. 443 // At least one is possible even for 0 sized attach range. 444 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1; 445 const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible); 446 447 const size_t stepsize = (attach_range == 0) ? // Only one try. 448 (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment); 449 450 // Try attach points from top to bottom. 451 char* attach_point = highest_start; 452 while (attach_point >= lowest_start && 453 attach_point <= highest_start && // Avoid wrap around. 454 ((_base == nullptr) || 455 (_base < aligned_heap_base_min_address || _base + size > upper_bound))) { 456 try_reserve_heap(size, alignment, page_size, attach_point); 457 attach_point -= stepsize; 458 } 459 } 460 461 #define SIZE_64K ((uint64_t) UCONST64( 0x10000)) 462 #define SIZE_256M ((uint64_t) UCONST64( 0x10000000)) 463 #define SIZE_32G ((uint64_t) UCONST64( 0x800000000)) 464 465 // Helper for heap allocation. Returns an array with addresses 466 // (OS-specific) which are suited for disjoint base mode. Array is 467 // null terminated. 468 static char** get_attach_addresses_for_disjoint_mode() { 469 static uint64_t addresses[] = { 470 2 * SIZE_32G, 471 3 * SIZE_32G, 472 4 * SIZE_32G, 473 8 * SIZE_32G, 474 10 * SIZE_32G, 475 1 * SIZE_64K * SIZE_32G, 476 2 * SIZE_64K * SIZE_32G, 477 3 * SIZE_64K * SIZE_32G, 478 4 * SIZE_64K * SIZE_32G, 479 16 * SIZE_64K * SIZE_32G, 480 32 * SIZE_64K * SIZE_32G, 481 34 * SIZE_64K * SIZE_32G, 482 0 483 }; 484 485 // Sort out addresses smaller than HeapBaseMinAddress. This assumes 486 // the array is sorted. 487 uint i = 0; 488 while (addresses[i] != 0 && 489 (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) { 490 i++; 491 } 492 uint start = i; 493 494 // Avoid more steps than requested. 495 i = 0; 496 while (addresses[start+i] != 0) { 497 if (i == HeapSearchSteps) { 498 addresses[start+i] = 0; 499 break; 500 } 501 i++; 502 } 503 504 return (char**) &addresses[start]; 505 } 506 507 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) { 508 guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax, 509 "can not allocate compressed oop heap for this size"); 510 guarantee(alignment == MAX2(alignment, os::vm_page_size()), "alignment too small"); 511 512 const size_t granularity = os::vm_allocation_granularity(); 513 assert((size & (granularity - 1)) == 0, 514 "size not aligned to os::vm_allocation_granularity()"); 515 assert((alignment & (granularity - 1)) == 0, 516 "alignment not aligned to os::vm_allocation_granularity()"); 517 assert(alignment == 0 || is_power_of_2((intptr_t)alignment), 518 "not a power of 2"); 519 520 // The necessary attach point alignment for generated wish addresses. 521 // This is needed to increase the chance of attaching for mmap and shmat. 522 // AIX is the only platform that uses System V shm for reserving virtual memory. 523 // In this case, the required alignment of the allocated size (64K) and the alignment 524 // of possible start points of the memory region (256M) differ. 525 // This is not reflected by os_allocation_granularity(). 526 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp 527 const size_t os_attach_point_alignment = 528 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M) 529 NOT_AIX(os::vm_allocation_granularity()); 530 531 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment); 532 533 char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment); 534 size_t noaccess_prefix = (((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) LP64_ONLY(|| UseCompatibleCompressedOops)) ? 535 noaccess_prefix_size(alignment) : 0; 536 537 // Attempt to alloc at user-given address. 538 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) LP64_ONLY(|| UseCompatibleCompressedOops)) { 539 try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address); 540 if (_base != aligned_heap_base_min_address) { // Enforce this exact address. 541 release(); 542 } 543 } 544 545 // Keep heap at HeapBaseMinAddress. 546 if (_base == nullptr) { 547 548 // Try to allocate the heap at addresses that allow efficient oop compression. 549 // Different schemes are tried, in order of decreasing optimization potential. 550 // 551 // For this, try_reserve_heap() is called with the desired heap base addresses. 552 // A call into the os layer to allocate at a given address can return memory 553 // at a different address than requested. Still, this might be memory at a useful 554 // address. try_reserve_heap() always returns this allocated memory, as only here 555 // the criteria for a good heap are checked. 556 557 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops). 558 // Give it several tries from top of range to bottom. 559 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax LP64_ONLY(&& !UseCompatibleCompressedOops)) { 560 561 // Calc address range within we try to attach (range of possible start addresses). 562 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); 563 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment); 564 try_reserve_range(highest_start, lowest_start, attach_point_alignment, 565 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size); 566 } 567 568 // zerobased: Attempt to allocate in the lower 32G. 569 char *zerobased_max = (char *)OopEncodingHeapMax; 570 571 // Give it several tries from top of range to bottom. 572 if (LP64_ONLY(!UseCompatibleCompressedOops &&) 573 aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible. 574 ((_base == nullptr) || // No previous try succeeded. 575 (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address. 576 577 // Calc address range within we try to attach (range of possible start addresses). 578 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment); 579 // Need to be careful about size being guaranteed to be less 580 // than UnscaledOopHeapMax due to type constraints. 581 char *lowest_start = aligned_heap_base_min_address; 582 uint64_t unscaled_end = UnscaledOopHeapMax - size; 583 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large 584 lowest_start = MAX2(lowest_start, (char*)unscaled_end); 585 } 586 lowest_start = align_up(lowest_start, attach_point_alignment); 587 try_reserve_range(highest_start, lowest_start, attach_point_alignment, 588 aligned_heap_base_min_address, zerobased_max, size, alignment, page_size); 589 } 590 591 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently 592 // implement null checks. 593 noaccess_prefix = noaccess_prefix_size(alignment); 594 595 // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode. 596 char** addresses = get_attach_addresses_for_disjoint_mode(); 597 int i = 0; 598 while (addresses[i] && // End of array not yet reached. 599 ((_base == nullptr) || // No previous try succeeded. 600 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address. 601 !CompressedOops::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address. 602 char* const attach_point = addresses[i]; 603 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken"); 604 try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point); 605 i++; 606 } 607 608 // Last, desperate try without any placement. 609 if (_base == nullptr) { 610 log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix); 611 initialize(size + noaccess_prefix, alignment, page_size, nullptr, false); 612 } 613 } 614 } 615 616 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() { 617 618 if (size == 0) { 619 return; 620 } 621 622 if (heap_allocation_directory != nullptr) { 623 _fd_for_heap = os::create_file_for_heap(heap_allocation_directory); 624 if (_fd_for_heap == -1) { 625 vm_exit_during_initialization( 626 err_msg("Could not create file for Heap at location %s", heap_allocation_directory)); 627 } 628 // When there is a backing file directory for this space then whether 629 // large pages are allocated is up to the filesystem of the backing file. 630 // If requested, let the user know that explicit large pages can't be used. 631 if (use_explicit_large_pages(page_size) && large_pages_requested()) { 632 log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set."); 633 } 634 } 635 636 // Heap size should be aligned to alignment, too. 637 guarantee(is_aligned(size, alignment), "set by caller"); 638 639 if (UseCompressedOops) { 640 initialize_compressed_heap(size, alignment, page_size); 641 if (_size > size LP64_ONLY(|| UseCompatibleCompressedOops)) { 642 // We allocated heap with noaccess prefix. 643 // It can happen we get a zerobased/unscaled heap with noaccess prefix, 644 // if we had to try at arbitrary address. 645 establish_noaccess_prefix(); 646 } 647 } else { 648 initialize(size, alignment, page_size, nullptr, false); 649 } 650 651 assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base, 652 "area must be distinguishable from marks for mark-sweep"); 653 assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size], 654 "area must be distinguishable from marks for mark-sweep"); 655 656 if (base() != nullptr) { 657 MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap); 658 } 659 660 if (_fd_for_heap != -1) { 661 ::close(_fd_for_heap); 662 } 663 } 664 665 MemRegion ReservedHeapSpace::region() const { 666 return MemRegion((HeapWord*)base(), (HeapWord*)end()); 667 } 668 669 // Reserve space for code segment. Same as Java heap only we mark this as 670 // executable. 671 ReservedCodeSpace::ReservedCodeSpace(size_t r_size, 672 size_t rs_align, 673 size_t rs_page_size) : ReservedSpace() { 674 initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true); 675 MemTracker::record_virtual_memory_tag((address)base(), mtCode); 676 } 677 678 // VirtualSpace 679 680 VirtualSpace::VirtualSpace() { 681 _low_boundary = nullptr; 682 _high_boundary = nullptr; 683 _low = nullptr; 684 _high = nullptr; 685 _lower_high = nullptr; 686 _middle_high = nullptr; 687 _upper_high = nullptr; 688 _lower_high_boundary = nullptr; 689 _middle_high_boundary = nullptr; 690 _upper_high_boundary = nullptr; 691 _lower_alignment = 0; 692 _middle_alignment = 0; 693 _upper_alignment = 0; 694 _special = false; 695 _executable = false; 696 } 697 698 699 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { 700 const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1); 701 return initialize_with_granularity(rs, committed_size, max_commit_granularity); 702 } 703 704 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { 705 if(!rs.is_reserved()) return false; // allocation failed. 706 assert(_low_boundary == nullptr, "VirtualSpace already initialized"); 707 assert(max_commit_granularity > 0, "Granularity must be non-zero."); 708 709 _low_boundary = rs.base(); 710 _high_boundary = low_boundary() + rs.size(); 711 712 _low = low_boundary(); 713 _high = low(); 714 715 _special = rs.special(); 716 _executable = rs.executable(); 717 718 // When a VirtualSpace begins life at a large size, make all future expansion 719 // and shrinking occur aligned to a granularity of large pages. This avoids 720 // fragmentation of physical addresses that inhibits the use of large pages 721 // by the OS virtual memory system. Empirically, we see that with a 4MB 722 // page size, the only spaces that get handled this way are codecache and 723 // the heap itself, both of which provide a substantial performance 724 // boost in many benchmarks when covered by large pages. 725 // 726 // No attempt is made to force large page alignment at the very top and 727 // bottom of the space if they are not aligned so already. 728 _lower_alignment = os::vm_page_size(); 729 _middle_alignment = max_commit_granularity; 730 _upper_alignment = os::vm_page_size(); 731 732 // End of each region 733 _lower_high_boundary = align_up(low_boundary(), middle_alignment()); 734 _middle_high_boundary = align_down(high_boundary(), middle_alignment()); 735 _upper_high_boundary = high_boundary(); 736 737 // High address of each region 738 _lower_high = low_boundary(); 739 _middle_high = lower_high_boundary(); 740 _upper_high = middle_high_boundary(); 741 742 // commit to initial size 743 if (committed_size > 0) { 744 if (!expand_by(committed_size)) { 745 return false; 746 } 747 } 748 return true; 749 } 750 751 752 VirtualSpace::~VirtualSpace() { 753 release(); 754 } 755 756 757 void VirtualSpace::release() { 758 // This does not release memory it reserved. 759 // Caller must release via rs.release(); 760 _low_boundary = nullptr; 761 _high_boundary = nullptr; 762 _low = nullptr; 763 _high = nullptr; 764 _lower_high = nullptr; 765 _middle_high = nullptr; 766 _upper_high = nullptr; 767 _lower_high_boundary = nullptr; 768 _middle_high_boundary = nullptr; 769 _upper_high_boundary = nullptr; 770 _lower_alignment = 0; 771 _middle_alignment = 0; 772 _upper_alignment = 0; 773 _special = false; 774 _executable = false; 775 } 776 777 778 size_t VirtualSpace::committed_size() const { 779 return pointer_delta(high(), low(), sizeof(char)); 780 } 781 782 783 size_t VirtualSpace::reserved_size() const { 784 return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); 785 } 786 787 788 size_t VirtualSpace::uncommitted_size() const { 789 return reserved_size() - committed_size(); 790 } 791 792 size_t VirtualSpace::actual_committed_size() const { 793 // Special VirtualSpaces commit all reserved space up front. 794 if (special()) { 795 return reserved_size(); 796 } 797 798 size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char)); 799 size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char)); 800 size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char)); 801 802 #ifdef ASSERT 803 size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char)); 804 size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char)); 805 size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char)); 806 807 if (committed_high > 0) { 808 assert(committed_low == lower, "Must be"); 809 assert(committed_middle == middle, "Must be"); 810 } 811 812 if (committed_middle > 0) { 813 assert(committed_low == lower, "Must be"); 814 } 815 if (committed_middle < middle) { 816 assert(committed_high == 0, "Must be"); 817 } 818 819 if (committed_low < lower) { 820 assert(committed_high == 0, "Must be"); 821 assert(committed_middle == 0, "Must be"); 822 } 823 #endif 824 825 return committed_low + committed_middle + committed_high; 826 } 827 828 829 bool VirtualSpace::contains(const void* p) const { 830 return low() <= (const char*) p && (const char*) p < high(); 831 } 832 833 static void pretouch_expanded_memory(void* start, void* end) { 834 assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment"); 835 assert(is_aligned(end, os::vm_page_size()), "Unexpected alignment"); 836 837 os::pretouch_memory(start, end); 838 } 839 840 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) { 841 if (os::commit_memory(start, size, alignment, executable)) { 842 if (pre_touch || AlwaysPreTouch) { 843 pretouch_expanded_memory(start, start + size); 844 } 845 return true; 846 } 847 848 debug_only(warning( 849 "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT 850 " size=" SIZE_FORMAT ", executable=%d) failed", 851 p2i(start), p2i(start + size), size, executable);) 852 853 return false; 854 } 855 856 /* 857 First we need to determine if a particular virtual space is using large 858 pages. This is done at the initialize function and only virtual spaces 859 that are larger than LargePageSizeInBytes use large pages. Once we 860 have determined this, all expand_by and shrink_by calls must grow and 861 shrink by large page size chunks. If a particular request 862 is within the current large page, the call to commit and uncommit memory 863 can be ignored. In the case that the low and high boundaries of this 864 space is not large page aligned, the pages leading to the first large 865 page address and the pages after the last large page address must be 866 allocated with default pages. 867 */ 868 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { 869 if (uncommitted_size() < bytes) { 870 return false; 871 } 872 873 if (special()) { 874 // don't commit memory if the entire space is pinned in memory 875 _high += bytes; 876 return true; 877 } 878 879 char* previous_high = high(); 880 char* unaligned_new_high = high() + bytes; 881 assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary"); 882 883 // Calculate where the new high for each of the regions should be. If 884 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned 885 // then the unaligned lower and upper new highs would be the 886 // lower_high() and upper_high() respectively. 887 char* unaligned_lower_new_high = MIN2(unaligned_new_high, lower_high_boundary()); 888 char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary()); 889 char* unaligned_upper_new_high = MIN2(unaligned_new_high, upper_high_boundary()); 890 891 // Align the new highs based on the regions alignment. lower and upper 892 // alignment will always be default page size. middle alignment will be 893 // LargePageSizeInBytes if the actual size of the virtual space is in 894 // fact larger than LargePageSizeInBytes. 895 char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment()); 896 char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment()); 897 char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment()); 898 899 // Determine which regions need to grow in this expand_by call. 900 // If you are growing in the lower region, high() must be in that 901 // region so calculate the size based on high(). For the middle and 902 // upper regions, determine the starting point of growth based on the 903 // location of high(). By getting the MAX of the region's low address 904 // (or the previous region's high address) and high(), we can tell if it 905 // is an intra or inter region growth. 906 size_t lower_needs = 0; 907 if (aligned_lower_new_high > lower_high()) { 908 lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); 909 } 910 size_t middle_needs = 0; 911 if (aligned_middle_new_high > middle_high()) { 912 middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); 913 } 914 size_t upper_needs = 0; 915 if (aligned_upper_new_high > upper_high()) { 916 upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); 917 } 918 919 // Check contiguity. 920 assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(), 921 "high address must be contained within the region"); 922 assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(), 923 "high address must be contained within the region"); 924 assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(), 925 "high address must be contained within the region"); 926 927 // Commit regions 928 if (lower_needs > 0) { 929 assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region"); 930 if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) { 931 return false; 932 } 933 _lower_high += lower_needs; 934 } 935 936 if (middle_needs > 0) { 937 assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region"); 938 if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) { 939 return false; 940 } 941 _middle_high += middle_needs; 942 } 943 944 if (upper_needs > 0) { 945 assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region"); 946 if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) { 947 return false; 948 } 949 _upper_high += upper_needs; 950 } 951 952 _high += bytes; 953 return true; 954 } 955 956 // A page is uncommitted if the contents of the entire page is deemed unusable. 957 // Continue to decrement the high() pointer until it reaches a page boundary 958 // in which case that particular page can now be uncommitted. 959 void VirtualSpace::shrink_by(size_t size) { 960 if (committed_size() < size) 961 fatal("Cannot shrink virtual space to negative size"); 962 963 if (special()) { 964 // don't uncommit if the entire space is pinned in memory 965 _high -= size; 966 return; 967 } 968 969 char* unaligned_new_high = high() - size; 970 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary"); 971 972 // Calculate new unaligned address 973 char* unaligned_upper_new_high = 974 MAX2(unaligned_new_high, middle_high_boundary()); 975 char* unaligned_middle_new_high = 976 MAX2(unaligned_new_high, lower_high_boundary()); 977 char* unaligned_lower_new_high = 978 MAX2(unaligned_new_high, low_boundary()); 979 980 // Align address to region's alignment 981 char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment()); 982 char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment()); 983 char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment()); 984 985 // Determine which regions need to shrink 986 size_t upper_needs = 0; 987 if (aligned_upper_new_high < upper_high()) { 988 upper_needs = 989 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); 990 } 991 size_t middle_needs = 0; 992 if (aligned_middle_new_high < middle_high()) { 993 middle_needs = 994 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); 995 } 996 size_t lower_needs = 0; 997 if (aligned_lower_new_high < lower_high()) { 998 lower_needs = 999 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); 1000 } 1001 1002 // Check contiguity. 1003 assert(middle_high_boundary() <= upper_high() && 1004 upper_high() <= upper_high_boundary(), 1005 "high address must be contained within the region"); 1006 assert(lower_high_boundary() <= middle_high() && 1007 middle_high() <= middle_high_boundary(), 1008 "high address must be contained within the region"); 1009 assert(low_boundary() <= lower_high() && 1010 lower_high() <= lower_high_boundary(), 1011 "high address must be contained within the region"); 1012 1013 // Uncommit 1014 if (upper_needs > 0) { 1015 assert(middle_high_boundary() <= aligned_upper_new_high && 1016 aligned_upper_new_high + upper_needs <= upper_high_boundary(), 1017 "must not shrink beyond region"); 1018 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) { 1019 debug_only(warning("os::uncommit_memory failed")); 1020 return; 1021 } else { 1022 _upper_high -= upper_needs; 1023 } 1024 } 1025 if (middle_needs > 0) { 1026 assert(lower_high_boundary() <= aligned_middle_new_high && 1027 aligned_middle_new_high + middle_needs <= middle_high_boundary(), 1028 "must not shrink beyond region"); 1029 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) { 1030 debug_only(warning("os::uncommit_memory failed")); 1031 return; 1032 } else { 1033 _middle_high -= middle_needs; 1034 } 1035 } 1036 if (lower_needs > 0) { 1037 assert(low_boundary() <= aligned_lower_new_high && 1038 aligned_lower_new_high + lower_needs <= lower_high_boundary(), 1039 "must not shrink beyond region"); 1040 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) { 1041 debug_only(warning("os::uncommit_memory failed")); 1042 return; 1043 } else { 1044 _lower_high -= lower_needs; 1045 } 1046 } 1047 1048 _high -= size; 1049 } 1050 1051 #ifndef PRODUCT 1052 void VirtualSpace::check_for_contiguity() { 1053 // Check contiguity. 1054 assert(low_boundary() <= lower_high() && 1055 lower_high() <= lower_high_boundary(), 1056 "high address must be contained within the region"); 1057 assert(lower_high_boundary() <= middle_high() && 1058 middle_high() <= middle_high_boundary(), 1059 "high address must be contained within the region"); 1060 assert(middle_high_boundary() <= upper_high() && 1061 upper_high() <= upper_high_boundary(), 1062 "high address must be contained within the region"); 1063 assert(low() >= low_boundary(), "low"); 1064 assert(low_boundary() <= lower_high_boundary(), "lower high boundary"); 1065 assert(upper_high_boundary() <= high_boundary(), "upper high boundary"); 1066 assert(high() <= upper_high(), "upper high"); 1067 } 1068 1069 void VirtualSpace::print_on(outputStream* out) const { 1070 out->print ("Virtual space:"); 1071 if (special()) out->print(" (pinned in memory)"); 1072 out->cr(); 1073 out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); 1074 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); 1075 out->print_cr(" - [low, high]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(low()), p2i(high())); 1076 out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(low_boundary()), p2i(high_boundary())); 1077 } 1078 1079 void VirtualSpace::print() const { 1080 print_on(tty); 1081 } 1082 1083 #endif