1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "logging/log.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "memory/virtualspace.hpp" 29 #include "nmt/memTracker.hpp" 30 #include "oops/compressedKlass.hpp" 31 #include "oops/compressedOops.hpp" 32 #include "oops/markWord.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/globals_extension.hpp" 35 #include "runtime/java.hpp" 36 #include "runtime/os.hpp" 37 #include "utilities/align.hpp" 38 #include "utilities/formatBuffer.hpp" 39 #include "utilities/powerOfTwo.hpp" 40 41 // ReservedSpace 42 43 // Dummy constructor 44 ReservedSpace::ReservedSpace() : _base(nullptr), _size(0), _noaccess_prefix(0), 45 _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) { 46 } 47 48 ReservedSpace::ReservedSpace(size_t size) : _fd_for_heap(-1) { 49 // Want to use large pages where possible. If the size is 50 // not large page aligned the mapping will be a mix of 51 // large and normal pages. 52 size_t page_size = os::page_size_for_region_unaligned(size, 1); 53 size_t alignment = os::vm_allocation_granularity(); 54 initialize(size, alignment, page_size, nullptr, false); 55 } 56 57 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) { 58 // When a page size is given we don't want to mix large 59 // and normal pages. If the size is not a multiple of the 60 // page size it will be aligned up to achieve this. 61 size_t alignment = os::vm_allocation_granularity();; 62 if (preferred_page_size != os::vm_page_size()) { 63 alignment = MAX2(preferred_page_size, alignment); 64 size = align_up(size, alignment); 65 } 66 initialize(size, alignment, preferred_page_size, nullptr, false); 67 } 68 69 ReservedSpace::ReservedSpace(size_t size, 70 size_t alignment, 71 size_t page_size, 72 char* requested_address) : _fd_for_heap(-1) { 73 initialize(size, alignment, page_size, requested_address, false); 74 } 75 76 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, size_t page_size, 77 bool special, bool executable) : _fd_for_heap(-1) { 78 assert((size % os::vm_allocation_granularity()) == 0, 79 "size not allocation aligned"); 80 initialize_members(base, size, alignment, page_size, special, executable); 81 } 82 83 // Helper method 84 static char* attempt_map_or_reserve_memory_at(char* base, size_t size, int fd, bool executable) { 85 if (fd != -1) { 86 return os::attempt_map_memory_to_file_at(base, size, fd); 87 } 88 return os::attempt_reserve_memory_at(base, size, executable); 89 } 90 91 // Helper method 92 static char* map_or_reserve_memory(size_t size, int fd, bool executable) { 93 if (fd != -1) { 94 return os::map_memory_to_file(size, fd); 95 } 96 return os::reserve_memory(size, executable); 97 } 98 99 // Helper method 100 static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fd, bool executable) { 101 if (fd != -1) { 102 return os::map_memory_to_file_aligned(size, alignment, fd); 103 } 104 return os::reserve_memory_aligned(size, alignment, executable); 105 } 106 107 // Helper method 108 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) { 109 if (is_file_mapped) { 110 if (!os::unmap_memory(base, size)) { 111 fatal("os::unmap_memory failed"); 112 } 113 } else if (!os::release_memory(base, size)) { 114 fatal("os::release_memory failed"); 115 } 116 } 117 118 // Helper method 119 static bool failed_to_reserve_as_requested(char* base, char* requested_address) { 120 if (base == requested_address || requested_address == nullptr) { 121 return false; // did not fail 122 } 123 124 if (base != nullptr) { 125 // Different reserve address may be acceptable in other cases 126 // but for compressed oops heap should be at requested address. 127 assert(UseCompressedOops, "currently requested address used only for compressed oops"); 128 log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address)); 129 } 130 return true; 131 } 132 133 static bool use_explicit_large_pages(size_t page_size) { 134 return !os::can_commit_large_page_memory() && 135 page_size != os::vm_page_size(); 136 } 137 138 static bool large_pages_requested() { 139 return UseLargePages && 140 (!FLAG_IS_DEFAULT(UseLargePages) || !FLAG_IS_DEFAULT(LargePageSizeInBytes)); 141 } 142 143 static void log_on_large_pages_failure(char* req_addr, size_t bytes) { 144 if (large_pages_requested()) { 145 // Compressed oops logging. 146 log_debug(gc, heap, coops)("Reserve regular memory without large pages"); 147 // JVM style warning that we did not succeed in using large pages. 148 char msg[128]; 149 jio_snprintf(msg, sizeof(msg), "Failed to reserve and commit memory using large pages. " 150 "req_addr: " PTR_FORMAT " bytes: " SIZE_FORMAT, 151 req_addr, bytes); 152 warning("%s", msg); 153 } 154 } 155 156 static char* reserve_memory(char* requested_address, const size_t size, 157 const size_t alignment, int fd, bool exec) { 158 char* base; 159 // If the memory was requested at a particular address, use 160 // os::attempt_reserve_memory_at() to avoid mapping over something 161 // important. If the reservation fails, return null. 162 if (requested_address != nullptr) { 163 assert(is_aligned(requested_address, alignment), 164 "Requested address " PTR_FORMAT " must be aligned to " SIZE_FORMAT, 165 p2i(requested_address), alignment); 166 base = attempt_map_or_reserve_memory_at(requested_address, size, fd, exec); 167 } else { 168 // Optimistically assume that the OS returns an aligned base pointer. 169 // When reserving a large address range, most OSes seem to align to at 170 // least 64K. 171 base = map_or_reserve_memory(size, fd, exec); 172 // Check alignment constraints. This is only needed when there is 173 // no requested address. 174 if (!is_aligned(base, alignment)) { 175 // Base not aligned, retry. 176 unmap_or_release_memory(base, size, fd != -1 /*is_file_mapped*/); 177 // Map using the requested alignment. 178 base = map_or_reserve_memory_aligned(size, alignment, fd, exec); 179 } 180 } 181 182 return base; 183 } 184 185 static char* reserve_memory_special(char* requested_address, const size_t size, 186 const size_t alignment, const size_t page_size, bool exec) { 187 188 log_trace(pagesize)("Attempt special mapping: size: " SIZE_FORMAT "%s, " 189 "alignment: " SIZE_FORMAT "%s", 190 byte_size_in_exact_unit(size), exact_unit_for_byte_size(size), 191 byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment)); 192 193 char* base = os::reserve_memory_special(size, alignment, page_size, requested_address, exec); 194 if (base != nullptr) { 195 // Check alignment constraints. 196 assert(is_aligned(base, alignment), 197 "reserve_memory_special() returned an unaligned address, base: " PTR_FORMAT 198 " alignment: " SIZE_FORMAT_X, 199 p2i(base), alignment); 200 } 201 return base; 202 } 203 204 void ReservedSpace::clear_members() { 205 initialize_members(nullptr, 0, 0, 0, false, false); 206 } 207 208 void ReservedSpace::initialize_members(char* base, size_t size, size_t alignment, 209 size_t page_size, bool special, bool executable) { 210 _base = base; 211 _size = size; 212 _alignment = alignment; 213 _page_size = page_size; 214 _special = special; 215 _executable = executable; 216 _noaccess_prefix = 0; 217 } 218 219 void ReservedSpace::reserve(size_t size, 220 size_t alignment, 221 size_t page_size, 222 char* requested_address, 223 bool executable) { 224 assert(is_aligned(size, alignment), "Size must be aligned to the requested alignment"); 225 226 // There are basically three different cases that we need to handle below: 227 // 1. Mapping backed by a file 228 // 2. Mapping backed by explicit large pages 229 // 3. Mapping backed by normal pages or transparent huge pages 230 // The first two have restrictions that requires the whole mapping to be 231 // committed up front. To record this the ReservedSpace is marked 'special'. 232 233 // == Case 1 == 234 if (_fd_for_heap != -1) { 235 // When there is a backing file directory for this space then whether 236 // large pages are allocated is up to the filesystem of the backing file. 237 // So UseLargePages is not taken into account for this reservation. 238 char* base = reserve_memory(requested_address, size, alignment, _fd_for_heap, executable); 239 if (base != nullptr) { 240 initialize_members(base, size, alignment, os::vm_page_size(), true, executable); 241 } 242 // Always return, not possible to fall back to reservation not using a file. 243 return; 244 } 245 246 // == Case 2 == 247 if (use_explicit_large_pages(page_size)) { 248 // System can't commit large pages i.e. use transparent huge pages and 249 // the caller requested large pages. To satisfy this request we use 250 // explicit large pages and these have to be committed up front to ensure 251 // no reservations are lost. 252 do { 253 char* base = reserve_memory_special(requested_address, size, alignment, page_size, executable); 254 if (base != nullptr) { 255 // Successful reservation using large pages. 256 initialize_members(base, size, alignment, page_size, true, executable); 257 return; 258 } 259 page_size = os::page_sizes().next_smaller(page_size); 260 } while (page_size > os::vm_page_size()); 261 262 // Failed to reserve explicit large pages, do proper logging. 263 log_on_large_pages_failure(requested_address, size); 264 // Now fall back to normal reservation. 265 assert(page_size == os::vm_page_size(), "inv"); 266 } 267 268 // == Case 3 == 269 char* base = reserve_memory(requested_address, size, alignment, -1, executable); 270 if (base != nullptr) { 271 // Successful mapping. 272 initialize_members(base, size, alignment, page_size, false, executable); 273 } 274 } 275 276 void ReservedSpace::initialize(size_t size, 277 size_t alignment, 278 size_t page_size, 279 char* requested_address, 280 bool executable) { 281 const size_t granularity = os::vm_allocation_granularity(); 282 assert((size & (granularity - 1)) == 0, 283 "size not aligned to os::vm_allocation_granularity()"); 284 assert((alignment & (granularity - 1)) == 0, 285 "alignment not aligned to os::vm_allocation_granularity()"); 286 assert(alignment == 0 || is_power_of_2((intptr_t)alignment), 287 "not a power of 2"); 288 assert(page_size >= os::vm_page_size(), "Invalid page size"); 289 assert(is_power_of_2(page_size), "Invalid page size"); 290 291 clear_members(); 292 293 if (size == 0) { 294 return; 295 } 296 297 // Adjust alignment to not be 0. 298 alignment = MAX2(alignment, os::vm_page_size()); 299 300 // Reserve the memory. 301 reserve(size, alignment, page_size, requested_address, executable); 302 303 // Check that the requested address is used if given. 304 if (failed_to_reserve_as_requested(_base, requested_address)) { 305 // OS ignored the requested address, release the reservation. 306 release(); 307 return; 308 } 309 } 310 311 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment) { 312 assert(partition_size <= size(), "partition failed"); 313 ReservedSpace result(base(), partition_size, alignment, page_size(), special(), executable()); 314 return result; 315 } 316 317 ReservedSpace ReservedSpace::last_part(size_t partition_size, size_t alignment) { 318 assert(partition_size <= size(), "partition failed"); 319 ReservedSpace result(base() + partition_size, size() - partition_size, 320 alignment, page_size(), special(), executable()); 321 return result; 322 } 323 324 ReservedSpace ReservedSpace::partition(size_t offset, size_t partition_size, size_t alignment) { 325 assert(offset + partition_size <= size(), "partition failed"); 326 ReservedSpace result(base() + offset, partition_size, alignment, page_size(), special(), executable()); 327 return result; 328 } 329 330 size_t ReservedSpace::page_align_size_up(size_t size) { 331 return align_up(size, os::vm_page_size()); 332 } 333 334 335 size_t ReservedSpace::page_align_size_down(size_t size) { 336 return align_down(size, os::vm_page_size()); 337 } 338 339 340 size_t ReservedSpace::allocation_align_size_up(size_t size) { 341 return align_up(size, os::vm_allocation_granularity()); 342 } 343 344 void ReservedSpace::release() { 345 if (is_reserved()) { 346 char *real_base = _base - _noaccess_prefix; 347 const size_t real_size = _size + _noaccess_prefix; 348 if (special()) { 349 if (_fd_for_heap != -1) { 350 os::unmap_memory(real_base, real_size); 351 } else { 352 os::release_memory_special(real_base, real_size); 353 } 354 } else{ 355 os::release_memory(real_base, real_size); 356 } 357 clear_members(); 358 } 359 } 360 361 // Put a ReservedSpace over an existing range 362 ReservedSpace ReservedSpace::space_for_range(char* base, size_t size, size_t alignment, 363 size_t page_size, bool special, bool executable) { 364 assert(is_aligned(base, os::vm_allocation_granularity()), "Unaligned base"); 365 assert(is_aligned(size, os::vm_page_size()), "Unaligned size"); 366 assert(os::page_sizes().contains(page_size), "Invalid pagesize"); 367 ReservedSpace space; 368 space.initialize_members(base, size, alignment, page_size, special, executable); 369 return space; 370 } 371 372 static size_t noaccess_prefix_size(size_t alignment) { 373 return lcm(os::vm_page_size(), alignment); 374 } 375 376 void ReservedHeapSpace::establish_noaccess_prefix() { 377 assert(_alignment >= os::vm_page_size(), "must be at least page size big"); 378 _noaccess_prefix = noaccess_prefix_size(_alignment); 379 380 if (base() && base() + _size > (char *)OopEncodingHeapMax) { 381 if (true 382 WIN64_ONLY(&& !UseLargePages) 383 AIX_ONLY(&& (os::Aix::supports_64K_mmap_pages() || os::vm_page_size() == 4*K))) { 384 // Protect memory at the base of the allocated region. 385 // If special, the page was committed (only matters on windows) 386 if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) { 387 fatal("cannot protect protection page"); 388 } 389 log_debug(gc, heap, coops)("Protected page at the reserved heap base: " 390 PTR_FORMAT " / " INTX_FORMAT " bytes", 391 p2i(_base), 392 _noaccess_prefix); 393 assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?"); 394 } else { 395 CompressedOops::set_use_implicit_null_checks(false); 396 } 397 } 398 399 _base += _noaccess_prefix; 400 _size -= _noaccess_prefix; 401 assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment"); 402 } 403 404 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'. 405 // Does not check whether the reserved memory actually is at requested_address, as the memory returned 406 // might still fulfill the wishes of the caller. 407 // Assures the memory is aligned to 'alignment'. 408 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first. 409 void ReservedHeapSpace::try_reserve_heap(size_t size, 410 size_t alignment, 411 size_t page_size, 412 char* requested_address) { 413 if (_base != nullptr) { 414 // We tried before, but we didn't like the address delivered. 415 release(); 416 } 417 418 // Try to reserve the memory for the heap. 419 log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT 420 " heap of size " SIZE_FORMAT_X, 421 p2i(requested_address), 422 size); 423 424 reserve(size, alignment, page_size, requested_address, false); 425 426 // Check alignment constraints. 427 if (is_reserved() && !is_aligned(_base, _alignment)) { 428 // Base not aligned, retry. 429 release(); 430 } 431 } 432 433 void ReservedHeapSpace::try_reserve_range(char *highest_start, 434 char *lowest_start, 435 size_t attach_point_alignment, 436 char *aligned_heap_base_min_address, 437 char *upper_bound, 438 size_t size, 439 size_t alignment, 440 size_t page_size) { 441 const size_t attach_range = highest_start - lowest_start; 442 // Cap num_attempts at possible number. 443 // At least one is possible even for 0 sized attach range. 444 const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1; 445 const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible); 446 447 const size_t stepsize = (attach_range == 0) ? // Only one try. 448 (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment); 449 450 // Try attach points from top to bottom. 451 char* attach_point = highest_start; 452 while (attach_point >= lowest_start && 453 attach_point <= highest_start && // Avoid wrap around. 454 ((_base == nullptr) || 455 (_base < aligned_heap_base_min_address || _base + size > upper_bound))) { 456 try_reserve_heap(size, alignment, page_size, attach_point); 457 attach_point -= stepsize; 458 } 459 } 460 461 #define SIZE_64K ((uint64_t) UCONST64( 0x10000)) 462 #define SIZE_256M ((uint64_t) UCONST64( 0x10000000)) 463 #define SIZE_32G ((uint64_t) UCONST64( 0x800000000)) 464 465 // Helper for heap allocation. Returns an array with addresses 466 // (OS-specific) which are suited for disjoint base mode. Array is 467 // null terminated. 468 static char** get_attach_addresses_for_disjoint_mode() { 469 static uint64_t addresses[] = { 470 2 * SIZE_32G, 471 3 * SIZE_32G, 472 4 * SIZE_32G, 473 8 * SIZE_32G, 474 10 * SIZE_32G, 475 1 * SIZE_64K * SIZE_32G, 476 2 * SIZE_64K * SIZE_32G, 477 3 * SIZE_64K * SIZE_32G, 478 4 * SIZE_64K * SIZE_32G, 479 16 * SIZE_64K * SIZE_32G, 480 32 * SIZE_64K * SIZE_32G, 481 34 * SIZE_64K * SIZE_32G, 482 0 483 }; 484 485 // Sort out addresses smaller than HeapBaseMinAddress. This assumes 486 // the array is sorted. 487 uint i = 0; 488 while (addresses[i] != 0 && 489 (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) { 490 i++; 491 } 492 uint start = i; 493 494 // Avoid more steps than requested. 495 i = 0; 496 while (addresses[start+i] != 0) { 497 if (i == HeapSearchSteps) { 498 addresses[start+i] = 0; 499 break; 500 } 501 i++; 502 } 503 504 return (char**) &addresses[start]; 505 } 506 507 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, size_t page_size) { 508 guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax, 509 "can not allocate compressed oop heap for this size"); 510 guarantee(alignment == MAX2(alignment, os::vm_page_size()), "alignment too small"); 511 512 const size_t granularity = os::vm_allocation_granularity(); 513 assert((size & (granularity - 1)) == 0, 514 "size not aligned to os::vm_allocation_granularity()"); 515 assert((alignment & (granularity - 1)) == 0, 516 "alignment not aligned to os::vm_allocation_granularity()"); 517 assert(alignment == 0 || is_power_of_2((intptr_t)alignment), 518 "not a power of 2"); 519 520 // The necessary attach point alignment for generated wish addresses. 521 // This is needed to increase the chance of attaching for mmap and shmat. 522 // AIX is the only platform that uses System V shm for reserving virtual memory. 523 // In this case, the required alignment of the allocated size (64K) and the alignment 524 // of possible start points of the memory region (256M) differ. 525 // This is not reflected by os_allocation_granularity(). 526 // The logic here is dual to the one in pd_reserve_memory in os_aix.cpp 527 const size_t os_attach_point_alignment = 528 AIX_ONLY(os::vm_page_size() == 4*K ? 4*K : 256*M) 529 NOT_AIX(os::vm_allocation_granularity()); 530 531 const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment); 532 533 char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment); 534 size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ? 535 noaccess_prefix_size(alignment) : 0; 536 537 // Attempt to alloc at user-given address. 538 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) { 539 try_reserve_heap(size + noaccess_prefix, alignment, page_size, aligned_heap_base_min_address); 540 if (_base != aligned_heap_base_min_address) { // Enforce this exact address. 541 release(); 542 } 543 } 544 545 // Keep heap at HeapBaseMinAddress. 546 if (_base == nullptr) { 547 548 // Try to allocate the heap at addresses that allow efficient oop compression. 549 // Different schemes are tried, in order of decreasing optimization potential. 550 // 551 // For this, try_reserve_heap() is called with the desired heap base addresses. 552 // A call into the os layer to allocate at a given address can return memory 553 // at a different address than requested. Still, this might be memory at a useful 554 // address. try_reserve_heap() always returns this allocated memory, as only here 555 // the criteria for a good heap are checked. 556 557 // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops). 558 // Give it several tries from top of range to bottom. 559 if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) { 560 561 // Calc address range within we try to attach (range of possible start addresses). 562 char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); 563 char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment); 564 try_reserve_range(highest_start, lowest_start, attach_point_alignment, 565 aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, page_size); 566 } 567 568 // zerobased: Attempt to allocate in the lower 32G. 569 char *zerobased_max = (char *)OopEncodingHeapMax; 570 571 // Give it several tries from top of range to bottom. 572 if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible. 573 ((_base == nullptr) || // No previous try succeeded. 574 (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address. 575 576 // Calc address range within we try to attach (range of possible start addresses). 577 char *const highest_start = align_down(zerobased_max - size, attach_point_alignment); 578 // Need to be careful about size being guaranteed to be less 579 // than UnscaledOopHeapMax due to type constraints. 580 char *lowest_start = aligned_heap_base_min_address; 581 uint64_t unscaled_end = UnscaledOopHeapMax - size; 582 if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large 583 lowest_start = MAX2(lowest_start, (char*)unscaled_end); 584 } 585 lowest_start = align_up(lowest_start, attach_point_alignment); 586 try_reserve_range(highest_start, lowest_start, attach_point_alignment, 587 aligned_heap_base_min_address, zerobased_max, size, alignment, page_size); 588 } 589 590 // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently 591 // implement null checks. 592 noaccess_prefix = noaccess_prefix_size(alignment); 593 594 // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode. 595 char** addresses = get_attach_addresses_for_disjoint_mode(); 596 int i = 0; 597 while (addresses[i] && // End of array not yet reached. 598 ((_base == nullptr) || // No previous try succeeded. 599 (_base + size > (char *)OopEncodingHeapMax && // Not zerobased or unscaled address. 600 !CompressedOops::is_disjoint_heap_base_address((address)_base)))) { // Not disjoint address. 601 char* const attach_point = addresses[i]; 602 assert(attach_point >= aligned_heap_base_min_address, "Flag support broken"); 603 try_reserve_heap(size + noaccess_prefix, alignment, page_size, attach_point); 604 i++; 605 } 606 607 // Last, desperate try without any placement. 608 if (_base == nullptr) { 609 log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix); 610 initialize(size + noaccess_prefix, alignment, page_size, nullptr, false); 611 } 612 } 613 } 614 615 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_size, const char* heap_allocation_directory) : ReservedSpace() { 616 617 if (size == 0) { 618 return; 619 } 620 621 if (heap_allocation_directory != nullptr) { 622 _fd_for_heap = os::create_file_for_heap(heap_allocation_directory); 623 if (_fd_for_heap == -1) { 624 vm_exit_during_initialization( 625 err_msg("Could not create file for Heap at location %s", heap_allocation_directory)); 626 } 627 // When there is a backing file directory for this space then whether 628 // large pages are allocated is up to the filesystem of the backing file. 629 // If requested, let the user know that explicit large pages can't be used. 630 if (use_explicit_large_pages(page_size) && large_pages_requested()) { 631 log_debug(gc, heap)("Cannot allocate explicit large pages for Java Heap when AllocateHeapAt option is set."); 632 } 633 } 634 635 // Heap size should be aligned to alignment, too. 636 guarantee(is_aligned(size, alignment), "set by caller"); 637 638 if (UseCompressedOops) { 639 initialize_compressed_heap(size, alignment, page_size); 640 if (_size > size) { 641 // We allocated heap with noaccess prefix. 642 // It can happen we get a zerobased/unscaled heap with noaccess prefix, 643 // if we had to try at arbitrary address. 644 establish_noaccess_prefix(); 645 } 646 } else { 647 initialize(size, alignment, page_size, nullptr, false); 648 } 649 650 assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base, 651 "area must be distinguishable from marks for mark-sweep"); 652 assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size], 653 "area must be distinguishable from marks for mark-sweep"); 654 655 if (base() != nullptr) { 656 MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap); 657 } 658 659 if (_fd_for_heap != -1) { 660 ::close(_fd_for_heap); 661 } 662 } 663 664 MemRegion ReservedHeapSpace::region() const { 665 return MemRegion((HeapWord*)base(), (HeapWord*)end()); 666 } 667 668 // Reserve space for code segment. Same as Java heap only we mark this as 669 // executable. 670 ReservedCodeSpace::ReservedCodeSpace(size_t r_size, 671 size_t rs_align, 672 size_t rs_page_size) : ReservedSpace() { 673 initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true); 674 MemTracker::record_virtual_memory_tag((address)base(), mtCode); 675 } 676 677 // VirtualSpace 678 679 VirtualSpace::VirtualSpace() { 680 _low_boundary = nullptr; 681 _high_boundary = nullptr; 682 _low = nullptr; 683 _high = nullptr; 684 _lower_high = nullptr; 685 _middle_high = nullptr; 686 _upper_high = nullptr; 687 _lower_high_boundary = nullptr; 688 _middle_high_boundary = nullptr; 689 _upper_high_boundary = nullptr; 690 _lower_alignment = 0; 691 _middle_alignment = 0; 692 _upper_alignment = 0; 693 _special = false; 694 _executable = false; 695 } 696 697 698 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { 699 const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1); 700 return initialize_with_granularity(rs, committed_size, max_commit_granularity); 701 } 702 703 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { 704 if(!rs.is_reserved()) return false; // allocation failed. 705 assert(_low_boundary == nullptr, "VirtualSpace already initialized"); 706 assert(max_commit_granularity > 0, "Granularity must be non-zero."); 707 708 _low_boundary = rs.base(); 709 _high_boundary = low_boundary() + rs.size(); 710 711 _low = low_boundary(); 712 _high = low(); 713 714 _special = rs.special(); 715 _executable = rs.executable(); 716 717 // When a VirtualSpace begins life at a large size, make all future expansion 718 // and shrinking occur aligned to a granularity of large pages. This avoids 719 // fragmentation of physical addresses that inhibits the use of large pages 720 // by the OS virtual memory system. Empirically, we see that with a 4MB 721 // page size, the only spaces that get handled this way are codecache and 722 // the heap itself, both of which provide a substantial performance 723 // boost in many benchmarks when covered by large pages. 724 // 725 // No attempt is made to force large page alignment at the very top and 726 // bottom of the space if they are not aligned so already. 727 _lower_alignment = os::vm_page_size(); 728 _middle_alignment = max_commit_granularity; 729 _upper_alignment = os::vm_page_size(); 730 731 // End of each region 732 _lower_high_boundary = align_up(low_boundary(), middle_alignment()); 733 _middle_high_boundary = align_down(high_boundary(), middle_alignment()); 734 _upper_high_boundary = high_boundary(); 735 736 // High address of each region 737 _lower_high = low_boundary(); 738 _middle_high = lower_high_boundary(); 739 _upper_high = middle_high_boundary(); 740 741 // commit to initial size 742 if (committed_size > 0) { 743 if (!expand_by(committed_size)) { 744 return false; 745 } 746 } 747 return true; 748 } 749 750 751 VirtualSpace::~VirtualSpace() { 752 release(); 753 } 754 755 756 void VirtualSpace::release() { 757 // This does not release memory it reserved. 758 // Caller must release via rs.release(); 759 _low_boundary = nullptr; 760 _high_boundary = nullptr; 761 _low = nullptr; 762 _high = nullptr; 763 _lower_high = nullptr; 764 _middle_high = nullptr; 765 _upper_high = nullptr; 766 _lower_high_boundary = nullptr; 767 _middle_high_boundary = nullptr; 768 _upper_high_boundary = nullptr; 769 _lower_alignment = 0; 770 _middle_alignment = 0; 771 _upper_alignment = 0; 772 _special = false; 773 _executable = false; 774 } 775 776 777 size_t VirtualSpace::committed_size() const { 778 return pointer_delta(high(), low(), sizeof(char)); 779 } 780 781 782 size_t VirtualSpace::reserved_size() const { 783 return pointer_delta(high_boundary(), low_boundary(), sizeof(char)); 784 } 785 786 787 size_t VirtualSpace::uncommitted_size() const { 788 return reserved_size() - committed_size(); 789 } 790 791 size_t VirtualSpace::actual_committed_size() const { 792 // Special VirtualSpaces commit all reserved space up front. 793 if (special()) { 794 return reserved_size(); 795 } 796 797 size_t committed_low = pointer_delta(_lower_high, _low_boundary, sizeof(char)); 798 size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary, sizeof(char)); 799 size_t committed_high = pointer_delta(_upper_high, _middle_high_boundary, sizeof(char)); 800 801 #ifdef ASSERT 802 size_t lower = pointer_delta(_lower_high_boundary, _low_boundary, sizeof(char)); 803 size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary, sizeof(char)); 804 size_t upper = pointer_delta(_upper_high_boundary, _middle_high_boundary, sizeof(char)); 805 806 if (committed_high > 0) { 807 assert(committed_low == lower, "Must be"); 808 assert(committed_middle == middle, "Must be"); 809 } 810 811 if (committed_middle > 0) { 812 assert(committed_low == lower, "Must be"); 813 } 814 if (committed_middle < middle) { 815 assert(committed_high == 0, "Must be"); 816 } 817 818 if (committed_low < lower) { 819 assert(committed_high == 0, "Must be"); 820 assert(committed_middle == 0, "Must be"); 821 } 822 #endif 823 824 return committed_low + committed_middle + committed_high; 825 } 826 827 828 bool VirtualSpace::contains(const void* p) const { 829 return low() <= (const char*) p && (const char*) p < high(); 830 } 831 832 static void pretouch_expanded_memory(void* start, void* end) { 833 assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment"); 834 assert(is_aligned(end, os::vm_page_size()), "Unexpected alignment"); 835 836 os::pretouch_memory(start, end); 837 } 838 839 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) { 840 if (os::commit_memory(start, size, alignment, executable)) { 841 if (pre_touch || AlwaysPreTouch) { 842 pretouch_expanded_memory(start, start + size); 843 } 844 return true; 845 } 846 847 debug_only(warning( 848 "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT 849 " size=" SIZE_FORMAT ", executable=%d) failed", 850 p2i(start), p2i(start + size), size, executable);) 851 852 return false; 853 } 854 855 /* 856 First we need to determine if a particular virtual space is using large 857 pages. This is done at the initialize function and only virtual spaces 858 that are larger than LargePageSizeInBytes use large pages. Once we 859 have determined this, all expand_by and shrink_by calls must grow and 860 shrink by large page size chunks. If a particular request 861 is within the current large page, the call to commit and uncommit memory 862 can be ignored. In the case that the low and high boundaries of this 863 space is not large page aligned, the pages leading to the first large 864 page address and the pages after the last large page address must be 865 allocated with default pages. 866 */ 867 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) { 868 if (uncommitted_size() < bytes) { 869 return false; 870 } 871 872 if (special()) { 873 // don't commit memory if the entire space is pinned in memory 874 _high += bytes; 875 return true; 876 } 877 878 char* previous_high = high(); 879 char* unaligned_new_high = high() + bytes; 880 assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary"); 881 882 // Calculate where the new high for each of the regions should be. If 883 // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned 884 // then the unaligned lower and upper new highs would be the 885 // lower_high() and upper_high() respectively. 886 char* unaligned_lower_new_high = MIN2(unaligned_new_high, lower_high_boundary()); 887 char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary()); 888 char* unaligned_upper_new_high = MIN2(unaligned_new_high, upper_high_boundary()); 889 890 // Align the new highs based on the regions alignment. lower and upper 891 // alignment will always be default page size. middle alignment will be 892 // LargePageSizeInBytes if the actual size of the virtual space is in 893 // fact larger than LargePageSizeInBytes. 894 char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment()); 895 char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment()); 896 char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment()); 897 898 // Determine which regions need to grow in this expand_by call. 899 // If you are growing in the lower region, high() must be in that 900 // region so calculate the size based on high(). For the middle and 901 // upper regions, determine the starting point of growth based on the 902 // location of high(). By getting the MAX of the region's low address 903 // (or the previous region's high address) and high(), we can tell if it 904 // is an intra or inter region growth. 905 size_t lower_needs = 0; 906 if (aligned_lower_new_high > lower_high()) { 907 lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char)); 908 } 909 size_t middle_needs = 0; 910 if (aligned_middle_new_high > middle_high()) { 911 middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char)); 912 } 913 size_t upper_needs = 0; 914 if (aligned_upper_new_high > upper_high()) { 915 upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char)); 916 } 917 918 // Check contiguity. 919 assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(), 920 "high address must be contained within the region"); 921 assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(), 922 "high address must be contained within the region"); 923 assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(), 924 "high address must be contained within the region"); 925 926 // Commit regions 927 if (lower_needs > 0) { 928 assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region"); 929 if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) { 930 return false; 931 } 932 _lower_high += lower_needs; 933 } 934 935 if (middle_needs > 0) { 936 assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region"); 937 if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) { 938 return false; 939 } 940 _middle_high += middle_needs; 941 } 942 943 if (upper_needs > 0) { 944 assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region"); 945 if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) { 946 return false; 947 } 948 _upper_high += upper_needs; 949 } 950 951 _high += bytes; 952 return true; 953 } 954 955 // A page is uncommitted if the contents of the entire page is deemed unusable. 956 // Continue to decrement the high() pointer until it reaches a page boundary 957 // in which case that particular page can now be uncommitted. 958 void VirtualSpace::shrink_by(size_t size) { 959 if (committed_size() < size) 960 fatal("Cannot shrink virtual space to negative size"); 961 962 if (special()) { 963 // don't uncommit if the entire space is pinned in memory 964 _high -= size; 965 return; 966 } 967 968 char* unaligned_new_high = high() - size; 969 assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary"); 970 971 // Calculate new unaligned address 972 char* unaligned_upper_new_high = 973 MAX2(unaligned_new_high, middle_high_boundary()); 974 char* unaligned_middle_new_high = 975 MAX2(unaligned_new_high, lower_high_boundary()); 976 char* unaligned_lower_new_high = 977 MAX2(unaligned_new_high, low_boundary()); 978 979 // Align address to region's alignment 980 char* aligned_upper_new_high = align_up(unaligned_upper_new_high, upper_alignment()); 981 char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment()); 982 char* aligned_lower_new_high = align_up(unaligned_lower_new_high, lower_alignment()); 983 984 // Determine which regions need to shrink 985 size_t upper_needs = 0; 986 if (aligned_upper_new_high < upper_high()) { 987 upper_needs = 988 pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char)); 989 } 990 size_t middle_needs = 0; 991 if (aligned_middle_new_high < middle_high()) { 992 middle_needs = 993 pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char)); 994 } 995 size_t lower_needs = 0; 996 if (aligned_lower_new_high < lower_high()) { 997 lower_needs = 998 pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char)); 999 } 1000 1001 // Check contiguity. 1002 assert(middle_high_boundary() <= upper_high() && 1003 upper_high() <= upper_high_boundary(), 1004 "high address must be contained within the region"); 1005 assert(lower_high_boundary() <= middle_high() && 1006 middle_high() <= middle_high_boundary(), 1007 "high address must be contained within the region"); 1008 assert(low_boundary() <= lower_high() && 1009 lower_high() <= lower_high_boundary(), 1010 "high address must be contained within the region"); 1011 1012 // Uncommit 1013 if (upper_needs > 0) { 1014 assert(middle_high_boundary() <= aligned_upper_new_high && 1015 aligned_upper_new_high + upper_needs <= upper_high_boundary(), 1016 "must not shrink beyond region"); 1017 if (!os::uncommit_memory(aligned_upper_new_high, upper_needs, _executable)) { 1018 debug_only(warning("os::uncommit_memory failed")); 1019 return; 1020 } else { 1021 _upper_high -= upper_needs; 1022 } 1023 } 1024 if (middle_needs > 0) { 1025 assert(lower_high_boundary() <= aligned_middle_new_high && 1026 aligned_middle_new_high + middle_needs <= middle_high_boundary(), 1027 "must not shrink beyond region"); 1028 if (!os::uncommit_memory(aligned_middle_new_high, middle_needs, _executable)) { 1029 debug_only(warning("os::uncommit_memory failed")); 1030 return; 1031 } else { 1032 _middle_high -= middle_needs; 1033 } 1034 } 1035 if (lower_needs > 0) { 1036 assert(low_boundary() <= aligned_lower_new_high && 1037 aligned_lower_new_high + lower_needs <= lower_high_boundary(), 1038 "must not shrink beyond region"); 1039 if (!os::uncommit_memory(aligned_lower_new_high, lower_needs, _executable)) { 1040 debug_only(warning("os::uncommit_memory failed")); 1041 return; 1042 } else { 1043 _lower_high -= lower_needs; 1044 } 1045 } 1046 1047 _high -= size; 1048 } 1049 1050 #ifndef PRODUCT 1051 void VirtualSpace::check_for_contiguity() { 1052 // Check contiguity. 1053 assert(low_boundary() <= lower_high() && 1054 lower_high() <= lower_high_boundary(), 1055 "high address must be contained within the region"); 1056 assert(lower_high_boundary() <= middle_high() && 1057 middle_high() <= middle_high_boundary(), 1058 "high address must be contained within the region"); 1059 assert(middle_high_boundary() <= upper_high() && 1060 upper_high() <= upper_high_boundary(), 1061 "high address must be contained within the region"); 1062 assert(low() >= low_boundary(), "low"); 1063 assert(low_boundary() <= lower_high_boundary(), "lower high boundary"); 1064 assert(upper_high_boundary() <= high_boundary(), "upper high boundary"); 1065 assert(high() <= upper_high(), "upper high"); 1066 } 1067 1068 void VirtualSpace::print_on(outputStream* out) const { 1069 out->print ("Virtual space:"); 1070 if (special()) out->print(" (pinned in memory)"); 1071 out->cr(); 1072 out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); 1073 out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); 1074 out->print_cr(" - [low, high]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(low()), p2i(high())); 1075 out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(low_boundary()), p2i(high_boundary())); 1076 } 1077 1078 void VirtualSpace::print() const { 1079 print_on(tty); 1080 } 1081 1082 #endif